blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f045e88e410fb769ce9548717c356b5dfbda0e11 | aa5be4f335d040394e6282b4cfe4612c4a0db65d | /polygontools/pt_modules/pt_tools_func.py | 28d8e5d2f1fcb56f687b07f88e01ef8e22b6fb3d | [
"MIT"
] | permissive | Ivanou-Dzmitry/polygon_tools_2_max | eaac4b5ded9c10b381e577c3982b19d47c4f8022 | eaccfaef92207a2114c9d0ec5152aaab28e8eb9d | refs/heads/main | 2023-03-18T12:10:49.463029 | 2021-02-28T16:32:49 | 2021-02-28T16:32:49 | 343,153,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,053 | py | # This Python file uses the following encoding: utf-8
#******************************************************************************************************
# Created: polygon.by
# # Last Updated: 12 may 2020
# Version: 2.0.1
#
# Authors:
# Dzmitry Ivanou
# Dzmitry Dzrynou
#
# Much thanks to Yury Ruskevich, CGCode Telegram Channel and Alexander Plechkov for some good ideas an support.
#
#******************************************************************************************************
# MODIFY THIS AT YOUR OWN RISK
import os
import sys
import math
import pymxs
import random
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
#from PySide2.QtUiTools import *
import pt_conclusion as conclusion
reload(conclusion)
import pt_conclusion as conclusion
reload(conclusion)
import pt_gen_func as gen_func
reload(gen_func)
import pt_uv_func as uvf
reload (uvf)
RootDir = ".."
if RootDir not in sys.path:
sys.path.append( RootDir )
import pt_config_loader as cfgl
reload(cfgl)
#GUI
class PT_Toools_Tab (QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
#set layoit
self.tabTool_v_layout = QVBoxLayout(self)
self.tabTool_v_layout.setAlignment(Qt.AlignTop)
MaxWidth = 370
#icons
CurrentDir = os.path.dirname(__file__)
IconPath = (CurrentDir + "\icons")
try:
iconFBXExport = QPixmap(IconPath + "/fbxexport_icon.png")
iconDelMat = QPixmap(IconPath + "/delete_mat_icon.png")
iconMateMat = QPixmap(IconPath + "/mate_mat_icon.png")
iconGlossMat = QPixmap(IconPath + "/gloss_mat_icon.png")
iconIntersect = QPixmap(IconPath + "/intersection_icon.png")
iconRetop = QPixmap(IconPath + "/retop_icon.png")
iconLod = QPixmap(IconPath + "/lod_check_icon.png")
iconRendPrev = QPixmap(IconPath + "/render_prev_icon.png")
iconNMMat = QPixmap(IconPath + "/nm_mat_icon.png")
except:
cmds.warning( "PolygonTools: Can't load icons for Tools Tab! Check icon files in pt_modules/icons directory.")
#label for info
self.lblInfo_01 = QLabel("Select an object and click on the necessary tool.")
self.lblInfo_01.setMargin(2)
#Mat Tools Group
self.gboxMats = QGroupBox("Materials")
self.gboxMats.setMaximumWidth(MaxWidth)
self.gboxMats.setMaximumHeight(100)
self.gboxMats_h_layout = QHBoxLayout()
self.btnDelMat = QPushButton("Delete")
self.btnDelMat.setStyleSheet("color:#000000;background-color:#E1E1E1;")
self.btnDelMat.setMinimumWidth(35)
self.btnDelMat.setMinimumHeight(35)
self.btnDelMat.setIcon(iconDelMat)
self.btnGlossMat = QPushButton("Gloss")
self.btnGlossMat.setStyleSheet("color:#000000;background-color:#E1E1E1;")
self.btnGlossMat.setMinimumWidth(35)
self.btnGlossMat.setMinimumHeight(35)
self.btnGlossMat.setIcon(iconGlossMat)
self.btnMateMat = QPushButton("Mate")
self.btnMateMat.setStyleSheet("color:#000000;background-color:#E1E1E1;")
self.btnMateMat.setMinimumWidth(35)
self.btnMateMat.setMinimumHeight(35)
self.btnMateMat.setIcon(iconMateMat)
self.btnNMMat = QPushButton("NM")
self.btnNMMat.setStyleSheet("color:#000000;background-color:#E1E1E1;")
self.btnNMMat.setMinimumWidth(35)
self.btnNMMat.setMinimumHeight(35)
self.btnNMMat.setIcon(iconNMMat)
#add elements
self.gboxMats_h_layout.addWidget(self.btnDelMat)
self.gboxMats_h_layout.addWidget(self.btnGlossMat)
self.gboxMats_h_layout.addWidget(self.btnMateMat)
self.gboxMats_h_layout.addWidget(self.btnNMMat)
#st layout
self.gboxMats.setLayout(self.gboxMats_h_layout)
#Export Tools Group
self.gboxExport = QGroupBox("Export")
self.gboxExport.setMaximumWidth(MaxWidth)
self.gboxExport.setMaximumHeight(100)
self.gboxExport_h_layout = QHBoxLayout()
self.gboxExport_h_layout.setAlignment(Qt.AlignLeft)
self.btnFBXExp = QPushButton("To FBX")
self.btnFBXExp.setStyleSheet("color:#000000;background-color:#E1E1E1;")
self.btnFBXExp.setMinimumWidth(75)
self.btnFBXExp.setMaximumWidth(75)
self.btnFBXExp.setMinimumHeight(35)
self.btnFBXExp.setIcon(iconFBXExport)
self.gboxExport_h_layout.addWidget(self.btnFBXExp)
self.gboxExport.setLayout(self.gboxExport_h_layout)
#LOD Group
self.gboxLOD = QGroupBox("LOD")
self.gboxLOD.setMaximumWidth(MaxWidth)
self.gboxLOD.setMaximumHeight(220)
self.gboxLOD.setMinimumHeight(220)
self.gboxLOD_h_layout = QHBoxLayout()
self.gboxLOD_h_layout.setSizeConstraint(QLayout.SetMinimumSize)
#left column
self.gboxLOD_v_layout1 = QVBoxLayout()
self.gboxLOD_v_layout1.setAlignment(Qt.AlignLeft)
self.gboxLOD_v_layout1.setAlignment(Qt.AlignTop)
#Rightn column
self.gboxLOD_v_layout2 = QVBoxLayout()
self.gboxLOD_v_layout2.setAlignment(Qt.AlignLeft)
self.gboxLOD_v_layout2.setAlignment(Qt.AlignTop)
#h for emulator
self.gboxLOD_h_layout1 = QHBoxLayout()
self.gboxLOD_h_layout1.setAlignment(Qt.AlignLeft)
#h for button
self.gboxLOD_h_layout2 = QHBoxLayout()
self.gboxLOD_h_layout2.setAlignment(Qt.AlignRight)
self.gboxLOD_h_layout2.setContentsMargins(0,66,0,0)
self.lblDist = QLabel("Switch range")
self.lblLOD0 = QLabel("LOD0")
self.lblLOD4 = QLabel("LOD4")
self.lblDistEmulation = QLabel("Virtual Distance")
self.sldLOD = QSlider()
self.sldLOD.setOrientation(Qt.Horizontal)
self.sldLOD.setMinimumHeight(20)
self.sldLOD.setMinimum(0)
self.sldLOD.setMaximum(4)
self.sldLOD.setTickInterval(1)
self.sldLOD.setValue(0)
self.sldLOD.setEnabled(False)
self.lblLodDist = QLabel("Distance:")
self.lblLodDist.setMinimumWidth(100)
self.btnLODcheck = QToolButton()
self.btnLODcheck.setText("LOD Check")
self.btnLODcheck.setIcon(iconLod)
self.btnLODcheck.setMinimumWidth(75)
self.btnLODcheck.setCheckable(True)
self.spnLOD1 = QSpinBox()
self.spnLOD1.setFixedWidth(90)
self.spnLOD1.setMinimum(5)
self.spnLOD1.setMaximum(200)
self.spnLOD1.setValue(10)
self.spnLOD1.setSingleStep(5)
self.spnLOD1.setPrefix("LOD1: ")
self.spnLOD1.setSuffix("m")
self.spnLOD2 = QSpinBox()
self.spnLOD2.setFixedWidth(90)
self.spnLOD2.setMinimum(10)
self.spnLOD2.setMaximum(400)
self.spnLOD2.setValue(20)
self.spnLOD2.setSingleStep(10)
self.spnLOD2.setPrefix("LOD2: ")
self.spnLOD2.setSuffix("m")
self.spnLOD3 = QSpinBox()
self.spnLOD3.setFixedWidth(90)
self.spnLOD3.setMinimum(20)
self.spnLOD3.setMaximum(600)
self.spnLOD3.setValue(30)
self.spnLOD3.setSingleStep(10)
self.spnLOD3.setPrefix("LOD3: ")
self.spnLOD3.setSuffix("m")
self.spnLOD4 = QSpinBox()
self.spnLOD4.setFixedWidth(90)
self.spnLOD4.setMinimum(30)
self.spnLOD4.setMaximum(800)
self.spnLOD4.setValue(40)
self.spnLOD4.setSingleStep(10)
self.spnLOD4.setPrefix("LOD4: ")
self.spnLOD4.setSuffix("m")
self.gboxLOD_v_layout1.addWidget(self.lblLodDist)
self.gboxLOD_v_layout1.addWidget(self.lblDist)
self.gboxLOD_v_layout1.addWidget(self.spnLOD1)
self.gboxLOD_v_layout1.addWidget(self.spnLOD2)
self.gboxLOD_v_layout1.addWidget(self.spnLOD3)
self.gboxLOD_v_layout1.addWidget(self.spnLOD4)
self.gboxLOD_v_layout2.addWidget(self.lblDistEmulation)
self.gboxLOD_h_layout1.addWidget(self.lblLOD0)
self.gboxLOD_h_layout1.addWidget(self.sldLOD)
self.gboxLOD_h_layout1.addWidget(self.lblLOD4)
self.gboxLOD_h_layout2.addWidget(self.btnLODcheck)
self.gboxLOD_v_layout2.addLayout(self.gboxLOD_h_layout1)
self.gboxLOD_v_layout2.addLayout(self.gboxLOD_h_layout2)
self.gboxLOD_h_layout.addLayout(self.gboxLOD_v_layout1)
self.gboxLOD_h_layout.addLayout(self.gboxLOD_v_layout2)
self.gboxLOD.setLayout(self.gboxLOD_h_layout)
#Intersect
self.gboxIntersect = QGroupBox("Check Intersection")
self.gboxIntersect.setMaximumWidth(MaxWidth)
self.gboxIntersect_h_layout = QHBoxLayout()
self.gboxIntersect_h_layout.setAlignment(Qt.AlignLeft)
#Common Tools Group
self.gboxCommon = QGroupBox("Common")
self.gboxCommon.setMaximumWidth(MaxWidth)
self.gboxCommon.setMaximumHeight(100)
self.gboxCommon_v_layout = QVBoxLayout()
self.btnPrevRend = QPushButton("Render Preview")
self.btnPrevRend.setStyleSheet("color:#000000;background-color:#E1E1E1;")
self.btnPrevRend.setIcon(iconRendPrev)
self.btnPrevRend.setMinimumWidth(110)
self.btnPrevRend.setMaximumWidth(110)
#intersect gui
self.lblFlyDist = QLabel("Depth (mm): ")
self.lblFlyDist.setMaximumWidth(65)
self.edtFlyDist = QLineEdit()
self.edtFlyDist.setMaxLength(2)
self.edtFlyDist.setMaximumWidth(40)
self.btnCheckFly = QToolButton()
self.btnCheckFly.setText("Check")
self.btnCheckFly.setIcon(iconIntersect)
self.btnCheckFly.setMaximumWidth(65)
self.btnCheckFly.setCheckable(True)
self.com_h_layout_01 = QHBoxLayout()
self.com_h_layout_01.setAlignment(Qt.AlignLeft)
self.com_h_layout_01.setContentsMargins(0,0,0,0)
self.com_h_layout_01.setSpacing(10)
self.gboxCommon_v_layout.addWidget(self.btnPrevRend)
self.gboxIntersect_h_layout.addWidget(self.lblFlyDist)
self.gboxIntersect_h_layout.addWidget(self.edtFlyDist)
self.gboxIntersect_h_layout.addWidget(self.btnCheckFly)
self.gboxCommon_v_layout.addLayout(self.com_h_layout_01)
self.gboxCommon.setLayout(self.gboxCommon_v_layout)
self.gboxIntersect.setLayout(self.gboxIntersect_h_layout)
self.gboxToolConclusion = QGroupBox("Conclusion")
self.gboxToolConclusion.setMaximumWidth(MaxWidth)
self.gboxToolConclusion.setMinimumHeight(170)
self.gboxToolConclusion_v_layout = QVBoxLayout()
#conclusion text here
self.txtbrowToolConclusion = QTextBrowser()
self.txtbrowToolConclusion.setStyleSheet("color:#ffffff; background-color:#2b2b2b;")
self.txtbrowToolConclusion.setHtml("")
self.gboxToolConclusion_v_layout.addWidget(self.txtbrowToolConclusion)
#Add Base elements
self.tabTool_v_layout.addWidget(self.lblInfo_01)
self.tabTool_v_layout.addWidget(self.gboxMats)
self.tabTool_v_layout.addWidget(self.gboxExport)
self.tabTool_v_layout.addWidget(self.gboxLOD)
self.tabTool_v_layout.addWidget(self.gboxIntersect)
#self.tabTool_v_layout.addWidget(self.gboxRetopo)
self.tabTool_v_layout.addWidget(self.gboxCommon)
#conclusion
self.gboxToolConclusion.setLayout(self.gboxToolConclusion_v_layout)
#conclusion area
self.tabTool_v_layout.addWidget(self.gboxToolConclusion)
#SIGNALS
self.btnDelMat.clicked.connect(self.btnDelMatClicked)
self.btnGlossMat.clicked.connect(self.btnGlossMatClicked)
self.btnMateMat.clicked.connect(self.btnMateMatClicked)
self.btnNMMat.clicked.connect(self.btnNMMatClicked)
self.btnFBXExp.clicked.connect(self.btnFBXExpClicked)
self.btnPrevRend.clicked.connect(self.btnPrevRendClicked)
self.btnCheckFly.clicked.connect(self.btnCheckFlyClicked)
self.edtFlyDist.editingFinished.connect(self.saveIntersetValue)
#LOD
self.spnLOD1.editingFinished.connect(self.lod1FinEdit)
self.spnLOD2.editingFinished.connect(self.lod2FinEdit)
self.spnLOD3.editingFinished.connect(self.lod3FinEdit)
self.spnLOD4.editingFinished.connect(self.lod4FinEdit)
self.btnLODcheck.clicked.connect(self.btnLODcheckClicked)
#Change lod Slider
self.sldLOD.sliderReleased.connect(self.currentLOD)
self.sldLOD.valueChanged.connect(self.lodSwitcher)
self.sldLOD.sliderPressed.connect(self.lodsldPressed)
#Func
#intro text
current_languge = cfgl.configLoader()[14]
self.txtbrowToolConclusion.setHtml( conclusion.toolTabIntroConclusion(current_languge) )
self.checkToolsValues()
scene_data = getSceneObjects("pt_spline")
if scene_data[1] == True:
self.btnCheckFly.setChecked(True)
else:
self.btnCheckFly.setChecked(False)
self.LODDistance = 0
self.sldPressed = False
#true - struct damaged
if checkLODLayerStructure() == True:
self.lodDisable()
else:
self.checkLODValues()
self.btnLODcheck.setChecked(True)
self.sldLOD.setEnabled(True)
def lodsldPressed(self):
#set slider status
self.sldPressed = True
def showInfo(self, info_type, info_text):
#trim lables
if len(info_text) > 100:
short_info_text = info_text[:100] + "..."
else:
short_info_text = info_text
if info_type=="info":
self.lblInfo_01.setText(short_info_text)
self.lblInfo_01.setStyleSheet("background-color:#3D523D;")
print "PolygonTools:", info_text
if info_type=="warn":
self.lblInfo_01.setText(short_info_text)
self.lblInfo_01.setStyleSheet("background-color:#916666;")
print( "PolygonTools: " + info_text )
if info_type=="lod":
self.lblInfo_01.setText(short_info_text)
self.lblInfo_01.setStyleSheet("background-color:#3D523D;")
if info_type=="fin":
self.lblInfo_01.setText(short_info_text)
self.lblInfo_01.setStyleSheet("background-color:#9E557A;")
print "PolygonTools:", info_text
#delete material
def btnDelMatClicked (self):
current_languge = cfgl.configLoader()[14]
#getselection
try:
selection_array = gen_func.checkSelection()
sel_objects = selection_array[0]
sel_editable_poly_objects = selection_array[1]
except:
print "Please select something. Editable Poly object for example..."
if len(sel_editable_poly_objects) > 0:
deleteMaterial(sel_editable_poly_objects)
self.showInfo ("info", "All materials removed from the object!")
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "DelMat") )
else:
conclusion_text = conclusion.noSelection(current_languge, "del_mat")
self.txtbrowToolConclusion.setHtml(conclusion_text)
self.showInfo ("warn", "Please select something for delete. Mesh object for example..")
#assign gloss
def btnGlossMatClicked (self):
current_languge = cfgl.configLoader()[14]
#getselection
try:
selection_array = gen_func.checkSelection()
sel_objects = selection_array[0]
sel_editable_poly_objects = selection_array[1]
except:
print "Please select something. Editable Poly object for example..."
if len(sel_editable_poly_objects) > 0:
createGlossMaterial(sel_editable_poly_objects)
self.showInfo ("info", "Gloss shader was asigned!")
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "GlossMat") )
else:
conclusion_text = conclusion.noSelection(current_languge, "gloss_mat")
self.txtbrowToolConclusion.setHtml(conclusion_text)
self.showInfo ("warn", "Please select something for assign. Mesh object for example..")
#assign Mate
def btnMateMatClicked (self):
current_languge = cfgl.configLoader()[14]
#getselection
try:
selection_array = gen_func.checkSelection()
sel_objects = selection_array[0]
sel_editable_poly_objects = selection_array[1]
except:
print "Please select something. Editable Poly object for example..."
if len(sel_editable_poly_objects) > 0:
createMateMaterial( sel_editable_poly_objects )
self.showInfo ("info", "Mate shader was asigned!")
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "MateMat") )
else:
conclusion_text = conclusion.noSelection(current_languge, "mate_mat")
self.txtbrowToolConclusion.setHtml(conclusion_text)
self.showInfo ("warn", "Please select something for assign. Mesh object for example..")
def btnNMMatClicked (self):
current_languge = cfgl.configLoader()[14]
#getselection
try:
selection_array = gen_func.checkSelection()
sel_objects = selection_array[0]
sel_editable_poly_objects = selection_array[1]
except:
print "Please select something. Editable Poly object for example..."
if len(sel_editable_poly_objects) > 0:
createNMMaterial(sel_editable_poly_objects)
self.showInfo ("info", "NormalMap material was asigned!")
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "NMMat") )
else:
conclusion_text = conclusion.noSelection(current_languge, "mate_mat")
self.txtbrowToolConclusion.setHtml(conclusion_text)
self.showInfo ("warn", "Please select something for assign. Mesh object for example..")
#exp to fbx
def btnFBXExpClicked (self):
current_languge = cfgl.configLoader()[14]
rt = pymxs.runtime
#getselection
try:
selection_array = gen_func.checkSelection()
sel_objects = selection_array[0]
sel_editable_poly_objects = selection_array[1]
except:
print "Please select something. Editable Poly object for example..."
FileDirectory = rt.maxFilePath
CurrentSceneFileName = rt.execute ("getFilenameFile maxFileName")
FullPathToFBXfile = FileDirectory + CurrentSceneFileName
PathToSave = FileDirectory
if len(sel_editable_poly_objects) > 0:
if len(FullPathToFBXfile) == 0:
self.showInfo ("warn", "Please save current scene before Export!")
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "FBXExpProblem") )
else:
if fbxExport(FullPathToFBXfile) == True:
self.showInfo ("info", "Export Complete! Path to FBX file: \n" + FileDirectory)
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "FBXExp") )
else:
self.showInfo ("warn", "Problems with export. Try exporting manually.")
else:
conclusion_text = conclusion.noSelection(current_languge, "fbx_exp")
self.txtbrowToolConclusion.setHtml(conclusion_text)
self.showInfo ("warn", "Please select something for export. Editable Poly object for example..")
#render Preview
def btnPrevRendClicked(self):
rt = pymxs.runtime
current_languge = cfgl.configLoader()[14]
#get dir
FileDirectory = rt.maxFilePath
#get scene name
CurrentSceneFileName = rt.execute ("getFilenameFile maxFileName")
#full path
FullPathToJPGfile = FileDirectory + CurrentSceneFileName
if len(FullPathToJPGfile) == 0:
self.showInfo ("warn", "For Render Preview first of all please save scene!")
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "RenderPreviewProblem") )
else:
renderResult = renderPreview(FullPathToJPGfile)
if renderResult == True:
self.showInfo ("info", "Preview successfully saved to: \n" + FileDirectory)
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "RenderPreview") )
else:
self.showInfo ("warn", 'Can\'t save preview!')
def btnCheckFlyClicked (self):
rt = pymxs.runtime
current_languge = cfgl.configLoader()[14]
#getselection
try:
selection_array = gen_func.checkSelection()
sel_objects = selection_array[0]
sel_editable_poly_objects = selection_array[1]
except:
print "Please select something. Editable Poly object for example..."
PTSplineInScene = False
scene_data = getSceneObjects("pt_spline")
#print scene_data
PTSplineInScene = scene_data[1]
# button not pressed - no obj
if (self.btnCheckFly.isChecked() == True) and (PTSplineInScene == False):
if len(sel_editable_poly_objects) > 0:
try:
#get radius and div 2
IntersectionRadius = ((float(self.edtFlyDist.text()))/1000)
except:
self.edtFlyDist.setText("10")
IntersectionRadius = 0.01
print ("Invalid value out of range (1-99)! Value set to default - 10mm")
self.saveIntersetValue()
ExtrudeResult = openEdgesExtrude(sel_editable_poly_objects, IntersectionRadius)
if ExtrudeResult == True:
self.showInfo ("info", "If you see the red lines - check the intersection between objects.")
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "CheckIntersect") )
else:
self.showInfo ("warn", "There are NO Open Edges on the model!")
#conclusion
self.txtbrowToolConclusion.setHtml( conclusion.variousConclusion(current_languge, "OpenEdges") )
self.btnCheckFly.setChecked(False)
else: #no selection
conclusion_text = conclusion.noSelection(current_languge, "check_intersect")
self.txtbrowToolConclusion.setHtml(conclusion_text)
self.showInfo ("warn", "Please select Editable Poly object(s)!")
self.btnCheckFly.setChecked(False)
# button pressed - obj present
elif (self.btnCheckFly.isChecked() == False) and (PTSplineInScene == True):
array_to_delete = []
for i in range(len(scene_data[0])):
if "pt_spline" in scene_data[0][i].name:
array_to_delete.append(scene_data[0][i])
#select and delete
rt.select(array_to_delete)
rt.execute("delete $")
self.showInfo ("info", "Previous intersection check was cleaned.")
rt.execute ("redrawViews()")
rt.subObjectLevel = 0
self.txtbrowToolConclusion.setHtml( conclusion.variousConclusion(current_languge, "IntersectClear") )
# button pressed - no obj (alredy deleted)
elif (self.btnCheckFly.isChecked() == False) and (PTSplineInScene == False):
self.showInfo ("info", "Previous intersection check already cleaned.")
rt.execute ("redrawViews()")
rt.subObjectLevel = 4
self.txtbrowToolConclusion.setHtml( conclusion.variousConclusion(current_languge, "IntersectAlreadyClear") )
#save value
def saveIntersetValue(self):
try:
int(self.edtFlyDist.text())
path_config = cfgl.configLoader()[99:101]
current_intersection_depth = self.edtFlyDist.text()
cfgl.ConfigWriter('Tools', 'intersection_depth', current_intersection_depth, path_config[0], path_config[1])
except:
cmds.warning("intersection_depth: Invalid value or value out of range (1-99)! Value set to default - 10mm")
path_config = cfgl.configLoader()[99:101]
current_intersection_depth = '10'
self.edtFlyDist.setText(current_intersection_depth)
cfgl.ConfigWriter('Tools', 'intersection_depth', current_intersection_depth, path_config[0], path_config[1])
def checkToolsValues(self):
current_intersection_depth = self.edtFlyDist.text()
#load data from config
data_from_config = cfgl.configLoader()[12:14]
#For Intersection
try:
config_intersection_depth = data_from_config[0]
int(config_intersection_depth)
#set intersect depth
if current_intersection_depth != config_intersection_depth:
self.edtFlyDist.setText(config_intersection_depth)
except:
self.saveIntersetValue()
#run LOD check
def btnLODcheckClicked (self):
current_languge = cfgl.configLoader()[14]
rt = pymxs.runtime
rt.clearSelection()
#check
if (self.btnLODcheck.isChecked() == True):
createLODStructure()
rt.registerRedrawViewsCallback(self.ShowDistanceToLODSInViewports)
self.sldLOD.setEnabled(True)
self.checkLODVis()
self.showInfo ("info", "LOD layers created. Put LODs geometry to the appropriate Layers.")
self.txtbrowToolConclusion.setHtml( conclusion.toolOperationConclusion(current_languge, "LOD") )
if self.btnLODcheck.isChecked() == False:
deleteLODStructure()
self.showInfo ("fin", "LOD layers was deleted.")
rt.unregisterRedrawViewsCallback(self.ShowDistanceToLODSInViewports)
self.lodDisable()
def lodDisable(self):
self.sldLOD.setValue(0)
self.sldLOD.setEnabled(False)
self.spnLOD1.setStyleSheet("")
self.spnLOD2.setStyleSheet("")
self.spnLOD3.setStyleSheet("")
self.spnLOD4.setStyleSheet("")
self.btnLODcheck.setChecked(False)
self.lblLodDist.setText ("Distance: ")
#edit lod values
def lod1FinEdit(self):
#write new values to config
path_config = cfgl.configLoader()[99:101]
currentlod1val = str(self.spnLOD1.value())
cfgl.ConfigWriter('LOD_distance', 'lod1', currentlod1val, path_config[0], path_config[1])
self.distChecker()
def lod2FinEdit(self):
#write new values to config
path_config = cfgl.configLoader()[99:101]
currentlod2val = str(self.spnLOD2.value())
cfgl.ConfigWriter('LOD_distance', 'lod2', currentlod2val, path_config[0], path_config[1])
self.distChecker()
def lod3FinEdit(self):
#write new values to config
path_config = cfgl.configLoader()[99:101]
currentlod3val = str(self.spnLOD3.value())
cfgl.ConfigWriter('LOD_distance', 'lod3', currentlod3val, path_config[0], path_config[1])
self.distChecker()
def lod4FinEdit(self):
#write new values to config
path_config = cfgl.configLoader()[99:101]
currentlod4val = str(self.spnLOD4.value())
cfgl.ConfigWriter('LOD_distance', 'lod4', currentlod4val, path_config[0], path_config[1])
self.distChecker()
#LOD dist checker
def distChecker(self):
#get start values
lod1val = self.spnLOD1.value()
lod2val = self.spnLOD2.value()
lod3val = self.spnLOD3.value()
lod4val = self.spnLOD4.value()
path_config = cfgl.configLoader()[99:101]
#compare values and Write to File if they different
if lod1val >= lod2val:
self.spnLOD2.setValue(lod1val + 10) #next lod cant less then previous
currentlod2val = str(self.spnLOD2.value())
cfgl.ConfigWriter('LOD_distance', 'lod2', currentlod2val, path_config[0], path_config[1])
lod2val = self.spnLOD2.value()
if lod2val >= lod3val:
self.spnLOD3.setValue(lod2val + 10)
currentlod3val = str(self.spnLOD3.value())
cfgl.ConfigWriter('LOD_distance', 'lod3', currentlod3val, path_config[0], path_config[1])
lod3val = self.spnLOD3.value()
if lod3val >= lod4val:
self.spnLOD4.setValue(lod3val + 10)
currentlod4val = str(self.spnLOD4.value())
cfgl.ConfigWriter('LOD_distance', 'lod4', currentlod4val, path_config[0], path_config[1])
def LODSwitcherONOFF (self, LODOn, LODa, LODb, LODc, LODd):
rt = pymxs.runtime
if checkLODLayerStructure() == False:
try:
rt.LayerManager.getLayerFromName(LODOn).on = True
rt.LayerManager.getLayerFromName(LODa).on = False
rt.LayerManager.getLayerFromName(LODb).on = False
rt.LayerManager.getLayerFromName(LODc).on = False
rt.LayerManager.getLayerFromName(LODd).on = False
rt.redrawViews()
except:
self.lodDisable()
print "ERROR! The structure for checking lods is Damaged! Check function will be Disabled."
rt.unregisterRedrawViewsCallback(self.ShowDistanceToLODSInViewports)
else:
rt.unregisterRedrawViewsCallback(self.ShowDistanceToLODSInViewports)
print "ERROR! The structure for checking lods is Dаmaged! Check function will be Disabled."
self.lodDisable()
def ShowDistanceToLODSInViewports(self):
rt = pymxs.runtime
#get start values
LOD1Val = int(self.spnLOD1.value())
LOD2Val = int(self.spnLOD2.value())
LOD3Val = int(self.spnLOD3.value())
LOD4Val = int(self.spnLOD4.value())
rt.execute ("DistMatrix = getViewTM()")
rt.execute("DistToLOD = DistMatrix.row4.z")
rt.execute("gw.updateScreen()")
rt.execute ("WinWidth = (gw.getWinSizeX() / 2)")
rt.execute(" gw.wText [WinWidth, 40, 0] (\"Distance: \" + (abs(DistToLOD)) as String) color: white")
self.LODDistance = int(rt.DistToLOD)*-1
if self.LODDistance < 0:
self.LODDistance = 0
if checkLODLayerStructure() == False:
if self.LODDistance < LOD1Val and (self.sldPressed == False):
self.LODSwitcherONOFF("lod0", "lod1", "lod2", "lod3", "lod4")
if self.LODDistance > LOD1Val and (self.sldPressed == False):
self.LODSwitcherONOFF("lod1", "lod0", "lod2", "lod3", "lod4")
if self.LODDistance > LOD2Val and (self.sldPressed == False):
self.LODSwitcherONOFF("lod2", "lod1", "lod0", "lod3", "lod4")
if self.LODDistance > LOD3Val and (self.sldPressed == False):
self.LODSwitcherONOFF("lod3", "lod1", "lod2", "lod0", "lod4")
if self.LODDistance > LOD4Val and (self.sldPressed == False):
self.LODSwitcherONOFF("lod4", "lod1", "lod2", "lod3", "lod0")
self.lblLodDist.setText ("Distance: " + str(self.LODDistance ))
self.checkLODVis()
else:
rt.unregisterRedrawViewsCallback(self.ShowDistanceToLODSInViewports)
print "ERROR! The structure for checking lods is damaged! Check function will be disabled."
self.lodDisable()
return self.LODDistance
#check lod vis
def checkLODVis(self):
rt = pymxs.runtime
try:
#get lod visibility
if checkLODLayerStructure() == False:
try:
lod0vis = rt.LayerManager.getLayerFromName("lod0").on
lod1vis = rt.LayerManager.getLayerFromName("lod1").on
lod2vis = rt.LayerManager.getLayerFromName("lod2").on
lod3vis = rt.LayerManager.getLayerFromName("lod3").on
lod4vis = rt.LayerManager.getLayerFromName("lod4").on
except:
pass
else:
pass
self.lblLodDist.setText("Distance: " + str(self.LODDistance))
#change color and slider position
if lod0vis == True:
self.spnLOD1.setStyleSheet("")
self.spnLOD2.setStyleSheet("")
self.spnLOD3.setStyleSheet("")
self.spnLOD4.setStyleSheet("")
self.sldLOD.setValue(0)
self.showInfo ("lod", "LOD 0 is displayed.")
if lod1vis == True:
self.spnLOD1.setStyleSheet("background-color:#005826;")
self.spnLOD2.setStyleSheet("")
self.spnLOD3.setStyleSheet("")
self.spnLOD4.setStyleSheet("")
self.sldLOD.setValue(1)
self.showInfo ("lod", "LOD 1 is displayed.")
if lod2vis == True:
self.spnLOD1.setStyleSheet("")
self.spnLOD2.setStyleSheet("background-color:#005826;")
self.spnLOD3.setStyleSheet("")
self.spnLOD4.setStyleSheet("")
self.sldLOD.setValue(2)
self.showInfo ("lod", "LOD 2 is displayed.")
if lod3vis == True:
self.spnLOD1.setStyleSheet("")
self.spnLOD2.setStyleSheet("")
self.spnLOD3.setStyleSheet("background-color:#005826;")
self.spnLOD4.setStyleSheet("")
self.sldLOD.setValue(3)
self.showInfo ("lod", "LOD 3 is displayed.")
if lod4vis == True:
self.spnLOD1.setStyleSheet("")
self.spnLOD2.setStyleSheet("")
self.spnLOD3.setStyleSheet("")
self.spnLOD4.setStyleSheet("background-color:#005826;")
self.sldLOD.setValue(4)
self.showInfo ("lod", "LOD 4 is displayed.")
except:
pass
#current lod is
def currentLOD (self):
rt = pymxs.runtime
self.sldPressed = False
if checkLODLayerStructure() == False:
#set normal LOD values and reset styles
try:
rt.LayerManager.getLayerFromName("lod0").on = True
rt.LayerManager.getLayerFromName("lod1").on = True
self.spnLOD1.setStyleSheet("")
rt.LayerManager.getLayerFromName("lod2").on = True
self.spnLOD2.setStyleSheet("")
rt.LayerManager.getLayerFromName("lod3").on = True
self.spnLOD3.setStyleSheet("")
rt.LayerManager.getLayerFromName("lod4").on = True
self.spnLOD4.setStyleSheet("")
self.checkLODVis()
except:
pass
else:
print "ERROR! The structure for checking lods is Damaged! Check function will be Disabled.1"
def lodSwitcher(self):
rt = pymxs.runtime
LOD = self.sldLOD.value()
#change visibility LODs only if Slider pressed
if (LOD == 0) and (self.sldPressed == True):
try:
rt.LayerManager.getLayerFromName("lod0").on = True
rt.LayerManager.getLayerFromName("lod1").on = False
rt.redrawViews()
except:
pass
if (LOD == 1) and (self.sldPressed == True):
try:
rt.LayerManager.getLayerFromName("lod0").on = False
rt.LayerManager.getLayerFromName("lod1").on = True
rt.LayerManager.getLayerFromName("lod2").on = False
rt.redrawViews()
except:
pass
if (LOD == 2) and (self.sldPressed == True):
try:
rt.LayerManager.getLayerFromName("lod1").on = False
rt.LayerManager.getLayerFromName("lod2").on = True
rt.LayerManager.getLayerFromName("lod3").on = False
rt.redrawViews()
except:
pass
if (LOD == 3) and (self.sldPressed == True):
try:
rt.LayerManager.getLayerFromName("lod2").on = False
rt.LayerManager.getLayerFromName("lod3").on = True
rt.LayerManager.getLayerFromName("lod4").on = False
rt.redrawViews()
except:
pass
if (LOD == 4) and (self.sldPressed == True):
try:
rt.LayerManager.getLayerFromName("lod3").on = False
rt.LayerManager.getLayerFromName("lod4").on = True
rt.redrawViews()
except:
pass
#lod val from file
def checkLODValues (self):
rt = pymxs.runtime
#get start values
currentlod1val = self.spnLOD1.value()
currentlod2val = self.spnLOD2.value()
currentlod3val = self.spnLOD3.value()
currentlod4val = self.spnLOD4.value()
data_from_config = cfgl.configLoader()[5:9]
#get config values
configlod1val = data_from_config[0]
configlod2val = data_from_config[1]
configlod3val = data_from_config[2]
configlod4val = data_from_config[3]
#set config values
if currentlod1val != configlod1val:
self.spnLOD1.setValue(int(configlod1val))
if currentlod2val != configlod2val:
self.spnLOD2.setValue(int(configlod2val))
if currentlod3val != configlod3val:
self.spnLOD3.setValue(int(configlod3val))
if currentlod4val != configlod4val:
self.spnLOD4.setValue(int(configlod4val))
if checkLODLayerStructure() == False:
rt.registerRedrawViewsCallback(self.ShowDistanceToLODSInViewports)
self.checkLODVis()
def deleteMaterial (sel_editable_poly_objects):
rt = pymxs.runtime
for i in range(len(sel_editable_poly_objects)):
rt.execute ("$" + sel_editable_poly_objects[i] + ".material = undefined")
rt.execute ("$" + sel_editable_poly_objects[i] + ".wirecolor = color (random 1 255) (random 1 255) (random 1 255)")
rt.execute ("actionMan.executeAction -844228238 \"13\"")
rt.execute ("redrawViews()")
def createMateMaterial(sel_editable_poly_objects):
rt = pymxs.runtime
material_data = uvf.checkShaderIntegrity()
#print material_data[10]
if material_data[10] == False:
#mat for checker
rt.execute ("PT_Matte_Material = Standard()")
rt.PT_Matte_Material.name = "PT_Matte_Material"
rt.PT_Matte_Material.glossiness = 0
#find free slot
Slot = 0
for i in range(0, 24):
SlotName = str(rt.meditMaterials[i])
if "- Default:" in SlotName:
Slot = i
break
else:
Slot = 0
rt.meditmaterials[Slot] = rt.PT_Matte_Material
print ("PolygonTools. Mate shader was created.")
for i in range(len(sel_editable_poly_objects)):
try:
rt.execute ("$" + sel_editable_poly_objects[i] + ".material = meditmaterials[\"PT_Matte_Material\"]")
except:
pass
try:
rt.execute ("$" + sel_editable_poly_objects[i] + ".material = scenematerials[\"PT_Matte_Material\"]")
except:
pass
rt.execute ("actionMan.executeAction -844228238 \"13\"")
rt.execute ("redrawViews()")
def createGlossMaterial(sel_editable_poly_objects):
rt = pymxs.runtime
material_data = uvf.checkShaderIntegrity()
#print material_data[11]
if material_data[11] == False:
#mat for checker
rt.execute ("PT_Gloss_Material = Standard()")
rt.PT_Gloss_Material.name = "PT_Gloss_Material"
rt.PT_Gloss_Material.glossiness = 50
rt.PT_Gloss_Material.specularLevel = 100
#find free slot
Slot = 0
for i in range(0, 24):
SlotName = str(rt.meditMaterials[i])
if "- Default:" in SlotName:
Slot = i
break
else:
Slot = 0
rt.meditmaterials[Slot] = rt.PT_Gloss_Material
print ("PolygonTools. Gloss shader was created.")
for i in range(len(sel_editable_poly_objects)):
try:
rt.execute ("$" + sel_editable_poly_objects[i] + ".material = meditmaterials[\"PT_Gloss_Material\"]")
except:
pass
try:
rt.execute ("$" + sel_editable_poly_objects[i] + ".material = scenematerials[\"PT_Gloss_Material\"]")
except:
pass
rt.execute ("actionMan.executeAction -844228238 \"13\"")
rt.execute ("redrawViews()")
def createNMMaterial(sel_editable_poly_objects):
rt = pymxs.runtime
#get data
material_data = uvf.checkShaderIntegrity()
#if not created
if material_data[12] == False:
#mat for checker
rt.execute ("PT_NM_Material = Standard()")
rt.PT_NM_Material.name = "PT_NM_Material"
rt.PT_NM_Material.glossiness = 50
rt.PT_NM_Material.specularLevel = 100
rt.PT_NM_Material.bumpMapEnable = True
rt.execute ("PT_NM_Material.bumpMap = Normal_Bump ()")
rt.PT_NM_Material.bumpMapAmount = 100
rt.PT_NM_Material.bumpMap.flipgreen = True
rt.PT_NM_Material.bumpMap.method = 0
#get root script dir
rt.execute ("UserScriptsDir = getDir #userScripts")
TempGetDirPath = rt.UserScriptsDir
#change symbols
GetDirPath = TempGetDirPath.replace ("\\", "/") + "/polygontools/pt_modules/"
#create full name
PathToNormalMapFile = GetDirPath + "pt_dummy_normal_map.png"
try:
rt.execute ("PT_NM_Material.bumpMap.normal_map = Bitmaptexture fileName:\"" + PathToNormalMapFile + "\"")
except:
print ("PolygonTools. Normal Map texture pt_dummy_normal_map.png not exist. Check files or try to re-install PolygonTools.")
print "Full Path to texture must be:", PathToNormalMapFile
#find free slot if possible
Slot = 0
for i in range(0, 24):
SlotName = str(rt.meditMaterials[i])
if "- Default:" in SlotName:
Slot = i
break
else:
Slot = 0
rt.meditmaterials[Slot] = rt.PT_NM_Material
print ("PolygonTools. NM shader was created.")
rt.execute ("actionMan.executeAction -844228238 \"12\"")
rt.execute ("actionMan.executeAction -844228238 \"0\"")
rt.execute ("actionMan.executeAction -844228238 \"5\"")
#assign material
for i in range(len(sel_editable_poly_objects)):
try:
rt.execute ("$" + sel_editable_poly_objects[i] + ".material = meditmaterials[\"PT_NM_Material\"]")
except:
pass
try:
rt.execute ("$" + sel_editable_poly_objects[i] + ".material = scenematerials[\"PT_NM_Material\"]")
except:
pass
rt.execute ("redrawViews()")
rt.execute ("actionMan.executeAction 0 \"63547\"")
def fbxExport(PathToFBXfile):
rt = pymxs.runtime
FullPathToFBXfile = PathToFBXfile.replace ("\\", "/")
PathWithoutExtension = FullPathToFBXfile.replace (".max", "")
#print PathWithoutExtension
#--Geometry------------------------------------------------------------------------
rt.execute ("FBXExporterSetParam \"SmoothingGroups\" true")
rt.execute ("FBXExporterSetParam \"NormalsPerPoly\" false")
rt.execute ("FBXExporterSetParam \"TangentSpaceExport\" true")
rt.execute ("FBXExporterSetParam \"SmoothMeshExport\" false")
rt.execute ("FBXExporterSetParam \"Preserveinstances\" false")
rt.execute ("FBXExporterSetParam \"SelectionSetExport\" false")
rt.execute ("FBXExporterSetParam \"GeomAsBone\" false")
rt.execute ("FBXExporterSetParam \"ColladaTriangulate\" true")
rt.execute ("FBXExporterSetParam \"PreserveEdgeOrientation\" true")
#--Animation------------------------------------------------------------------------
rt.execute ("FBXExporterSetParam \"Animation\" false")
#--Cameras------------------------------------------------------------------------
rt.execute ("FBXExporterSetParam \"Cameras\" false")
#--Lights------------------------------------------------------------------------
rt.execute ("FBXExporterSetParam \"Lights\" false")
#--Embed Media--------------------------------------------------------------------
rt.execute ("FBXExporterSetParam \"EmbedTextures\" false")
#--Units----------------------------------------------------------------------------
#--Axis Conversion-----------------------------------------------------------------
rt.execute ("FBXExporterSetParam \"AxisConversionMethod\" \"Fbx_Root\"")
rt.execute ("FBXExporterSetParam \"UpAxis\" \"Y\" ")
#--UI----------------------------------------------------------------
rt.execute ("FBXExporterSetParam \"ShowWarnings\" true")
rt.execute ("FBXExporterSetParam \"GenerateLog\" false")
#--FBX File Format----------------------------------------------------------------
rt.execute ("FBXExporterSetParam \"ASCII\" true")
rt.execute ("FBXExporterSetParam \"FileVersion\" \"FBX201800\"")
try:
rt.execute ("exportFile \"" + PathWithoutExtension + "\" #noPrompt selectedOnly:true using:FBXEXP")
rt.execute ("messagebox \"Export to FBX successfully complete!\" title:\"Polygon Tools 2\" ")
return True
except:
return False
def openEdgesExtrude(sel_editable_poly_objects, IntersectionRadius):
rt = pymxs.runtime
rt.execute ("max modify mode")
#get selection
SelectedNodes = rt.selection
ObjWithoutOpenEdges = False
for i in range(len(sel_editable_poly_objects)):
rt.execute ("select $" + sel_editable_poly_objects[i])
rt.execute ("subobjectLevel = 3")
rt.execute ("actionMan.executeAction 0 \"40021\"")
rt.execute ("SelectedEdgesCount = $" + sel_editable_poly_objects[i] + ".EditablePoly.GetSelection #Edge; SelectedEdgesCount = SelectedEdgesCount as Array")
if len(rt.SelectedEdgesCount) > 0:
SplineName = sel_editable_poly_objects[i] + "_pt_spline"
rt.execute ("select $" + sel_editable_poly_objects[i])
rt.execute ("$.EditablePoly.createShape \"" + SplineName + "\" off $")
rt.execute ("select $" + SplineName)
rt.execute ("$" + SplineName + ".render_displayRenderMesh = true")
rt.execute ("$" + SplineName + ".render_thickness = " + str(IntersectionRadius) )
rt.execute ("$" + SplineName + ".render_sides = 4")
rt.execute ("$" + SplineName + ".wirecolor = color 255 0 0")
rt.execute ("$" + SplineName + ".material = Standard ()")
rt.execute ("$" + SplineName + ".material.name = \"PT_Spline\"")
rt.execute ("$" + SplineName + ".material.selfIllumAmount = 100")
rt.execute ("$" + SplineName + ".showVertexColors = on")
rt.execute ("$" + SplineName + ".vertexColorsShaded = on")
rt.execute ("convertto $" + SplineName + " editable_poly")
rt.execute ("subobjectLevel = 4")
rt.execute ("actionMan.executeAction 0 \"40021\"")
rt.execute ("$" + SplineName + ".SetFaceColor (color 255 0 0) #VertexColor")
rt.execute ("subobjectLevel = 0")
rt.execute ("actionMan.executeAction 0 \"550\"")
else:
print sel_editable_poly_objects[i], "There are no open edges on the model."
ObjWithoutOpenEdges = True
#return selection
rt.select(SelectedNodes)
if (len(sel_editable_poly_objects) == 1) and (ObjWithoutOpenEdges == True):
return False
else:
return True
#get all scene objects
def getSceneObjects (Name):
rt = pymxs.runtime
#get all scene ojects Before Rename
SceneObjects = rt.objects
#renamer
for i in range(len(SceneObjects)):
try:
ObjectClass = str(SceneObjects[i].name)
except:
SceneObjects[i].name = "pt_renamed_object_" + str(i)
print '\t', "Name Errors Detected! Auto-renamed object:", SceneObjects[i].name
#get all scene ojects After Rename
SceneObjects = rt.objects
NameInScenePresence = False
#finde specify name
for i in range(len(SceneObjects)):
if Name in SceneObjects[i].name:
NameInScenePresence = True
return SceneObjects, NameInScenePresence
#LOD layers creatoe
def createLODStructure():
rt = pymxs.runtime
for i in range(0, 5):
LayerName = "lod" + str(i)
LayerObjectName = "LOD" + str(i)
if rt.LayerManager.getLayerFromName(LayerName) == None:
LayerObjectName = rt.LayerManager.newLayer()
LayerObjectName.setname(LayerName)
else:
print LayerName, "layer already created!"
print "\nPolygonTools. LOD layers structure was sucessfyly created on scene.\n"
#delete LOD layetr structure
def deleteLODStructure():
rt = pymxs.runtime
try:
for i in range(0, 5):
LayerName = "lod" + str(i)
rt.LayerManager.deleteLayerHierarchy(LayerName, forceDelete = True)
except:
print '\n', "PolygonTools. The structure for checking LOD's is damaged. Please delete layers manually!"
def checkLODLayerStructure():
rt = pymxs.runtime
LayerError = False
try:
for i in range(0, 5):
LayerName = "lod" + str(i)
rt.LayerManager.getLayerFromName(LayerName).on
except:
LayerError = True
return LayerError
def renderPreview(Path):
rt = pymxs.runtime
try:
FullPath = Path.replace ("\\", "/")
PathWithoutExtension = FullPath.replace (".max", "")
rt.execute ("actionMan.executeAction 0 \"550\"")
rt.execute ("max zoomext sel all")
rt.execute ("viewport.setType #view_persp_user")
rt.execute ("img = gw.getViewportDib()")
rt.execute ("img.filename = \"" + PathWithoutExtension + "_preview.jpg\"; save img")
rt.execute ("messagebox \"Render preview complete.\" title:\"Polygon Tools 2\"")
return True
except:
return False | [
"noreply@github.com"
] | Ivanou-Dzmitry.noreply@github.com |
51ff98de2341afa7e968bbb9daed055ef27593eb | 6732f5aba14fcce584e8803db7ccf3c6b6a4dd8f | /test/unit_test/test_json_api_request.py | f49b2259cfe32cc0de09c0a3820d2c66dbf3bfd3 | [] | no_license | ventaquil/json-api-smart | 7dfdf23ac91fb4f848121a46566d754cc2457aac | 7dc664c18479791a0556eec565ee9e937482c580 | refs/heads/master | 2022-11-23T23:12:05.820378 | 2020-08-01T14:54:08 | 2020-08-01T14:54:08 | 284,279,866 | 0 | 0 | null | 2020-08-01T14:51:41 | 2020-08-01T14:51:41 | null | UTF-8 | Python | false | false | 2,718 | py | from unittest.mock import Mock, patch
import requests_mock
from src.json_api_request import JsonApiRequest
import json
@patch("src.json_api_request.JsonApiCallContext")
def test_find_returns_build_type_from_request(json_api_call_context_mock):
with requests_mock.Mocker() as m:
m.get("http://base_url.de/articles/1", text=json.dumps({"data": "huhu"}))
type_mock = Mock(return_value=Mock())
type_mock.resource_name.return_value = "articles"
type_mock.base_url.return_value = "http://base_url.de"
test_json_api_url = JsonApiRequest(type_mock)
result = test_json_api_url.find(1)
assert result == type_mock.return_value
type_mock.assert_called_once_with(json_api_call_context_mock.return_value)
@patch("src.json_api_request.JsonApiCallContext")
def test_all_returns_build_types_from_request(json_api_call_context_mock):
with requests_mock.Mocker() as m:
m.get(
"http://base_url.de/articles", text=json.dumps({"data": ["huhu", "haha"]})
)
type_mock = Mock(return_value=Mock())
type_mock.resource_name.return_value = "articles"
type_mock.base_url.return_value = "http://base_url.de"
test_json_api_url = JsonApiRequest(type_mock)
result = test_json_api_url.all()
assert result == [type_mock.return_value, type_mock.return_value]
assert type_mock.call_count == 2
type_mock.assert_any_call(json_api_call_context_mock.return_value)
type_mock.assert_any_call(json_api_call_context_mock.return_value)
def test_all_returns_build_types_with_filter_parameter():
with requests_mock.Mocker() as m:
m.get(
"http://base_url.de/articles?filter[type]=some_type",
text=json.dumps({"data": ["huhu"]}),
)
type_mock = Mock(return_value=Mock())
type_mock.resource_name.return_value = "articles"
type_mock.base_url.return_value = "http://base_url.de"
test_json_api_url = JsonApiRequest(type_mock)
result = test_json_api_url.where(type="some_type").all()
assert result == [type_mock.return_value]
def test_all_returns_build_types_with_query_parameter():
with requests_mock.Mocker() as m:
m.get(
"http://base_url.de/articles?type=some_type",
text=json.dumps({"data": ["huhu"]}),
)
type_mock = Mock(return_value=Mock())
type_mock.resource_name.return_value = "articles"
type_mock.base_url.return_value = "http://base_url.de"
test_json_api_url = JsonApiRequest(type_mock)
result = test_json_api_url.with_params(type="some_type").all()
assert result == [type_mock.return_value]
| [
"peter.laudel@gmail.com"
] | peter.laudel@gmail.com |
8a212c64dfea9859cd41cea1386e1802076720e3 | 2e3420550bbc78509e53abb2e4ecec534a8600ee | /Questao10/richardson.py | de3919c62e33c300f704839a71afda9eb596243a | [] | no_license | EMachad0/P1ANN | 15532236f1f95d9dab0e3f0bcee27b82c4b2c562 | f3ef0820f321e0d6fbffedfd14e0c52f907807f0 | refs/heads/master | 2022-12-07T02:11:57.918980 | 2020-08-19T20:26:40 | 2020-08-19T20:26:40 | 288,514,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | from math import *
def fk(x, h, n, p):
# I'm recursive :)
if n == 1:
return df(x, h)
n -= 1
return (2 ** (n * p) * fk(x, h/2, n, p) - fk(x, h, n, p)) / (2 ** (n * p) - 1)
fs = ["cos(x**x)",
"sin(x)",
"x**(cos(x))",
"e**(-x**2)"]
dfs = ["(f(p)-f(p-h))/h",
"(f(p+h)-f(p-h))/(2*h)",
"(f(p-2*h)-8*f(p-h)+8*f(p+h)-f(p+2*h))/(12*h)"]
hs = [0.1, 0.05, 0.025, 0.0125]
ns = [4, 2, 1] # ordem do erro
x0 = 1 # ponto
p = 1 # b ???????????
for q in range(len(fs)):
def f(x):
return eval(fs[q])
print(f"### Questão {q+1}:")
print("|h|erro O(h)|erro O(h^2)|erro O(h^4)|")
print("|--|--|--|--|")
for h in hs:
r = []
for i in range(len(dfs)):
def df(p, h):
return eval(dfs[i])
r.append(fk(x0, h, ns[i], p))
print("", h, *r, "", sep="|")
print()
# import sympy as sy
# x = sy.Symbol('x')
# f = sy.sympify('x ** x')
# df = sy.diff(f, x, 2).subs(x, x0).evalf()
# print('exact:', df) | [
"elitonmahcadod200@gmail.com"
] | elitonmahcadod200@gmail.com |
6177c3e5145dab0ebb77f902ac0a558478083544 | 1ee2cd179e9eb2ec7541dec5b14ce993624181b8 | /openrasp_iast/plugin/scanner/directory_basic.py | 4883ffe987a3f1031767825e28eed46ad47c6f17 | [
"Apache-2.0"
] | permissive | Ze4lfRoG/openrasp-iast | 0c8492d5c9fbe0c5b3d994f8aa703628361dd405 | 0fd4cdaae642a759cffe214de51c392b75aa828e | refs/heads/master | 2020-10-01T09:05:36.359241 | 2019-12-11T12:06:43 | 2019-12-11T12:06:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Copyright 2017-2019 Baidu Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from core.components.plugin import scan_plugin_base
class ScanPlugin(scan_plugin_base.ScanPluginBase):
plugin_info = {
"name": "directory_basic",
"show_name": "目录遍历检测插件",
"description": "基础目录遍历漏洞检测插件"
}
def mutant(self, rasp_result_ins):
"""
测试向量生成
"""
if not rasp_result_ins.has_hook_type("directory"):
return
linux_payload_list = [
("../../../../../../../../../../../../../../../../../../../../etc", "/etc"),
("../../../../etc", "/etc"),
("/etc", "/etc")
]
windows_payload_list = [
("..\\..\\..\\..\\..\\..\\..\\..\\..\\openrasp_dir", ":/openrasp_dir"),
("file://c:\\openrasp_dir", "c:\\openrasp_dir")
]
mac_payload_list = [
("../../../../../../../../../../../../../../../../../../../../private/etc", "/private/etc"),
("../../../private/etc", "/private/etc"),
("/private/etc", "/private/etc")
]
server_os = rasp_result_ins.get_server_info()["os"]
if server_os == "Windows":
payload_list = windows_payload_list
elif server_os == "Mac":
payload_list = mac_payload_list
else:
payload_list = linux_payload_list
# 获取所有待测试参数
request_data_ins = self.new_request_data(rasp_result_ins)
test_params = self.mutant_helper.get_params_list(
request_data_ins, ["get", "post", "json", "headers", "cookies"])
for param in test_params:
if not request_data_ins.is_param_concat_in_hook("directory", param["value"].rstrip("/\\")):
continue
payload_seq = self.gen_payload_seq()
for payload in payload_list:
request_data_ins = self.new_request_data(
rasp_result_ins, payload_seq, payload[1])
request_data_ins.set_param(
param["type"], param["name"], payload[0])
request_data_list = [request_data_ins]
yield request_data_list
def check(self, request_data_list):
"""
请求结果检测
"""
request_data_ins = request_data_list[0]
feature = request_data_ins.get_payload_info()["feature"]
rasp_result_ins = request_data_ins.get_rasp_result()
if rasp_result_ins is None:
return None
if self.checker.check_concat_in_hook(rasp_result_ins, "directory", feature):
return "读取的目录可被用户输入控制"
else:
return None
| [
"350905402@qq.com"
] | 350905402@qq.com |
3fb18c3471e9347c1454d23b38bb0318930d0646 | 625a24dd7e5955a6aee5f079104643c702d1ccf0 | /test_rpn.py | 8f6ab44c695e937b0e82bb0e74fd40c8a26c4ad6 | [] | no_license | issaelam/csprag-f19-rpn | 867ecb844552b789cb38c868ceaffcdcec4693ae | 01949f414b90506f8d9feb6dee567dbadac47b20 | refs/heads/master | 2020-09-02T11:54:35.681251 | 2019-11-03T14:15:48 | 2019-11-03T14:15:48 | 219,215,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import unittest
import rpn
class TestBasics(unittest.TestCase):
def test_add(self):
result = rpn.calculate("1 1 +")
self.assertEqual(2, result)
def test_subtract(self):
result = rpn.calculate("5 3 -")
self.assertEqual(2, result)
def test_multiply(self):
result = rpn.calculate("5 3 *")
self.assertEqual(15, result)
def test_divide(self):
result = rpn.calculate("6 3 /")
self.assertEqual(2, result)
def test_exponent(self):
result = rpn.calculate("5 3 ^")
self.assertEqual(125, result)
| [
"issaelam@umich.edu"
] | issaelam@umich.edu |
22344bf97357534acdd377ef55d44b05eb0b9b1e | 533e4099e396c2d1135b08477e0c3c46aa1847df | /learning_log/learning_logs/admin.py | 7175813b5fb9d4ff6a56f5abc8f8c8b824a6c1cc | [] | no_license | Lancelot0902/Python-program | 1bbe4b549525a724a61a5e113337edc890cd071f | 84a07af3f450fa21edd5f68378d171dd3cc84928 | refs/heads/master | 2020-08-12T14:50:54.662820 | 2019-11-19T13:08:37 | 2019-11-19T13:08:37 | 214,785,359 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from learning_logs.models import Topic,Entry
from learning_logs.models import Topic
admin.site.register(Topic)
admin.site.register(Entry)
# Register your models here.
| [
"877675145@qq.com"
] | 877675145@qq.com |
420e1eeb908c981764e313bb26175b98853cb34a | e2d7d1c41e37f0b4bdb95933b3ba23aba9fbaa27 | /Notes.txt | a38bee18e1349cfcd1f04dcfbaf279dd300780cc | [] | no_license | Keshav-Bachu/Reinforcement_Learning_Model | 6d9f3563bbffea0d4a944303f26aba70d7825dd6 | d577edac20dbfde91eb60298fdd5105bd353580f | refs/heads/master | 2020-03-29T07:46:01.178797 | 2018-12-03T06:04:49 | 2018-12-03T06:04:49 | 149,678,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | txt | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 08:44:54 2018
@author: keshavbachu
"""
class Qnetwork():
def __init__(self,h_size):
#The network recieves a frame from the game, flattened into an array.
#It then resizes it and processes it through four convolutional layers.
self.scalarInput = tf.placeholder(shape=[None,21168],dtype=tf.float32)
self.imageIn = tf.reshape(self.scalarInput,shape=[-1,84,84,3])
self.conv1 = slim.conv2d( \
inputs=self.imageIn,num_outputs=32,kernel_size=[8,8],stride=[4,4],padding='VALID', biases_initializer=None)
self.conv2 = slim.conv2d( \
inputs=self.conv1,num_outputs=64,kernel_size=[4,4],stride=[2,2],padding='VALID', biases_initializer=None)
self.conv3 = slim.conv2d( \
inputs=self.conv2,num_outputs=64,kernel_size=[3,3],stride=[1,1],padding='VALID', biases_initializer=None)
self.conv4 = slim.conv2d( \
inputs=self.conv3,num_outputs=h_size,kernel_size=[7,7],stride=[1,1],padding='VALID', biases_initializer=None)
#We take the output from the final convolutional layer and split it into separate advantage and value streams.
self.streamQ = slim.flatten(self.conv4)
xavier_init = tf.contrib.layers.xavier_initializer()
self.QW = tf.Variable(xavier_init([h_size,env.actions]))
self.Qout = tf.matmul(self.streamQ,self.QW)
#Then combine them together to get our final Q-values.
self.predict = tf.argmax(self.Qout,1)
#Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
self.targetQ = tf.placeholder(shape=[None],dtype=tf.float32)
self.actions = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions,env.actions,dtype=tf.float32)
self.Q = tf.reduce_sum(tf.multiply(self.Qout, self.actions_onehot), axis=1)
self.td_error = tf.square(self.targetQ - self.Q)
self.loss = tf.reduce_mean(self.td_error)
self.trainer = tf.train.AdamOptimizer(learning_rate=0.0001)
self.updateModel = self.trainer.minimize(self.loss) | [
"keshavbachu@gmail.com"
] | keshavbachu@gmail.com |
c5203e6ce03d1cd022d854fd04fea546f5d2afbb | 3a2726e08f9069bc07dc7659c353d2f1c059a6af | /nas4candle/candle/Combo/problems/problem_large_10.py | ad49db2d09d963dbda143101a71b55518ddab475 | [
"BSD-3-Clause"
] | permissive | bigwater/nas4candle | fe3b3ffeaebb46d2ad8c027f774b4873209c2aeb | 318959424cc66819c816054a87bd1cb5d426e2e7 | refs/heads/master | 2022-01-08T19:01:38.520321 | 2019-05-02T17:32:41 | 2019-05-02T17:32:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | from nas4candle.nasapi.benchmark import Problem
from nas4candle.candle.Combo.models.candle_mlp_large import create_structure
# We create our Problem object with the Problem class, you don't have to name your Problem object 'Problem' it can be any name you want. You can also define different problems in the same module.
Problem = Problem()
# You define the create structure function. This function will return an object following the Structure interface. You can also have kwargs arguments such as 'num_cells' for this function.
Problem.add_dim('create_structure', {
'func': create_structure
})
# You define the hyperparameters used to train your generated models during the search.
Problem.add_dim('hyperparameters', {
'num_epochs': 1,
})
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == '__main__':
print(Problem)
| [
"scrlnas2019@gmail.com"
] | scrlnas2019@gmail.com |
395dac819262f385d12130046669a66a9e9543b6 | 076773bd4620e3a6d48ff2feab52ebb4e75b88c9 | /sending sms to different customers at ones using a for loop(creating lists,list of dictonaries).py | de5b6ee5f352e36cdafac469822fbd47cc1eb081 | [] | no_license | marybahati/PYTHON | 24203475b3c9a9ff8865c06783841e0366f95166 | d89e3b6bec8850b6098f53383c3cba80879ca36e | refs/heads/master | 2022-12-03T09:53:44.171718 | 2019-07-23T21:09:36 | 2019-07-23T21:09:36 | 193,958,665 | 0 | 0 | null | 2022-11-22T03:56:26 | 2019-06-26T18:34:02 | Python | UTF-8 | Python | false | false | 2,212 | py | Python 3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 22:20:52) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> customer1={"name":"sharon","balance":400}
>>> customer2={"name":"eva","balance":2500}
>>> customer3={"name":"mercy","balance":300}
>>> customer4={"name":"mary","balance":5500}
>>> customer5={"name":"noon","balance":4400}
>>> customers
Traceback (most recent call last):
File "<pyshell#5>", line 1, in <module>
customers
NameError: name 'customers' is not defined
>>> customer
Traceback (most recent call last):
File "<pyshell#6>", line 1, in <module>
customer
NameError: name 'customer' is not defined
>>> customers=[customer1,customer2,customer3,customer4,customer5]
>>> customers
[{'name': 'sharon', 'balance': 400}, {'name': 'eva', 'balance': 2500}, {'name': 'mercy', 'balance': 300}, {'name': 'mary', 'balance': 5500}, {'name': 'noon', 'balance': 4400}]
>>> for customer in customers:
sms="Hi{},your balance is {}".format(customer["name"],customer["balance"])
>>> print(sms)
Hinoon,your balance is 4400
>>> >>> for customer in customers:
sms="Hi {},your balance is {}".format(customer["name"],customer["balance"])
SyntaxError: invalid syntax
>>> for customer in customers:
sms="Hi {},your balance is {}".format(customer["name"],customer["balance"])
>>> print(sms)
Hi noon,your balance is 4400
>>> >>> for customer in customers:
sms="Hi{},your balance is {}".format(customer["name"],customer["balance"])
SyntaxError: invalid syntax
>>> for customer in customers:
sms="Hi{},your balance is {}".format(customer["name"],customer["balance"])
print(sms)
Hisharon,your balance is 400
Hieva,your balance is 2500
Himercy,your balance is 300
Himary,your balance is 5500
Hinoon,your balance is 4400
>>> >>> for customer in customers:
sms="Hi{},your balance is {}".format(customer["name"],customer["balance"])
SyntaxError: invalid syntax
>>> for customer in customers:
sms="Hi {},your balance is {}".format(customer["name"],customer["balance"])
print(sms)
Hi sharon,your balance is 400
Hi eva,your balance is 2500
Hi mercy,your balance is 300
Hi mary,your balance is 5500
Hi noon,your balance is 4400
>>>
| [
"bahatimary745@gmail.com"
] | bahatimary745@gmail.com |
cad384be9aede5c74227c0ca4d556d1ada8cbe9a | 772e04b18f36fe1bffb05c16ef4eff3ba765fd13 | /gcnvisualizer/test/test_visualizer.py | 853d514babfbb1580b6492b0b4ad3a106332f9ae | [
"LicenseRef-scancode-other-permissive"
] | permissive | clinfo/kGCN | 3c74f552dd9d71d470a3173012b01733a1262688 | 32328d5a41e6ed7491b3edb705ff94658fc95d3f | refs/heads/master | 2023-08-16T19:43:17.149381 | 2023-08-03T00:08:11 | 2023-08-03T00:08:11 | 194,075,235 | 110 | 38 | NOASSERTION | 2022-02-04T17:09:55 | 2019-06-27T10:31:57 | Python | UTF-8 | Python | false | false | 503 | py | import unittest
import numpy as np
import pytest
from gcnvisualizer import GCNVisualizer
def test_load_normal_pickle_file(multi_modal_profeat):
for filename in multi_modal_profeat:
g = GCNVisualizer(filename, loglevel='ERROR')
assert ['smiles', 'feature',
'adjacency', 'check_scores',
'feature_IG', 'adjacency_IG',
'profeat_IG', 'vector_modal'] == (list(g.ig_dict.keys()))
if __name__ == "__main__":
unittest.run()
| [
"kojima.ryosuke.8e@kyoto-u.ac.jp"
] | kojima.ryosuke.8e@kyoto-u.ac.jp |
e45a726c5c6c4a6a720449e4eb2e4e791dea81b2 | a3b7b3c319706f6b50b9516fed851ef0d7abe971 | /.debris/2019-05-01 07.35.25.00/PgFunctions.py | 47df0c95e73d92ea640ad59cdca2881afb25854c | [] | no_license | graeca/pg-ocr | c8b46aa227dd4a76915f4718ce3ae577ea915a80 | 53ce27feb378973a3cdc77b318838c479af1805d | refs/heads/master | 2021-10-13T11:27:06.708513 | 2019-07-17T16:28:54 | 2019-07-17T16:28:54 | 182,812,117 | 0 | 0 | null | 2021-10-12T22:54:24 | 2019-04-22T15:08:33 | Jupyter Notebook | UTF-8 | Python | false | false | 12,587 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 15:19:21 2019
@author: ainok
"""
import cv2 as cv2
import numpy as np
from matplotlib import pyplot as plt
import glob, os
import re
from pathlib import Path
def createPadding(img, margin):
w=img.shape[1]
#print(w)
for i in range(10):
img = np.insert(img, w, values=255, axis=1)
for i in range(10):
img = np.insert(img, 0, values=255, axis=1)
h=img.shape[0]
#print(w)
for i in range(10):
img = np.insert(img, h, values=255, axis=0)
for i in range(10):
img = np.insert(img, 0, values=255, axis=0)
return img
def createPadding2(query, margin):
#size=np.size(query)
#query = np.insert(query, w, values=0, axis=1)
#create horizontal space
w=query.shape[0]
x=np.zeros(shape=(w, margin))
x.fill(255)
#x.astype(int)
query = np.hstack((query, x))
query = np.hstack((x,query))
h=query.shape[1]
y=np.zeros(shape=(margin, h))
y.fill(255)
query = np.vstack((query, y))
query = np.vstack((y,query))
return query
def clearBorder(img):
#img = np.array([[10, 2], [0, 4], [5, 6]])
w=img.shape[1]
h=img.shape[0]
#print(w)
#print(h)
for row in range(h):
if img[row, 0] == 0:
#print(row)
cv2.floodFill(img, None, (0, row), 255)
if img[row, w-1] == 0:
cv2.floodFill(img, None, (w-1, row), 255)
for col in range(w):
if img[0, col] == 0:
cv2.floodFill(img, None, (col, 0), 255)
if img[h-1, col] == 0:
cv2.floodFill(img, None, (col, h-1), 255)
return img
def cutPunctuation(img,wordname_path):
#plt.imshow(img)
img = cv2.bitwise_not(img)
newX=256
newY=256
#plt.figure()
#plt.imshow(img)
#img = cv2.resize(img,(int(newX),int(newY)))
img=img//255
y=img.sum(axis=0)
#print (y)
#print(y.shape)
num=np.size(img,1)
x = np.arange(num)
###print(x.shape)
###fig, ax = plt.subplots()
###ax.plot(x,y)
###plt.show()
###plt.figure()
#width=np.size(img,0)
#xc=np.size(img,0)
#print(width)
#img = img[0:width, 0:250]
#plt.imshow(img)
#x = np.array([1,0,2,0,3,0,4,5,6,7,8])
#print("========")
index=np.where(y == 0)[0]
#index=np.where(y <= 10)[0]
#print(index)
len=index.shape[0]
rev_index=index[::-1]
#print(len)
#print("========")
width=np.size(img,1)
height=np.size(img,0)
#print(width)
#print(height)
sum_ink=0
for i in range(0, len-1):
#print(i,"++++")
if rev_index[i+1]<rev_index[i]-1: #by pass the contigues zeros that exist
#print("dif=",rev_index[i]-rev_index[i+1],"index=",i )
#print(rev_index[i])
#print(rev_index[i+1])
xc=rev_index[i+1]
imgF = img[0:height, 0:xc]
imgT= img[0:height, xc:width]
half=height/2
#print('==========')
#print('half',half)
#print('xc',xc)
hprof=imgT.sum(axis=1)
sumakiarea=hprof.sum()
hprofIndex=np.where(hprof == 0)
hprofZerosNum=np.size(hprofIndex,1)
#print('zeros',hprofZerosNum)
#print('==========')
#if hprofZerosNum-3 >= half:
if sumakiarea <=90:
#cut
imgF=255*imgF
imgF=abs(255-imgF)
imgReturn=imgF
#plt.figure()
#plt.imshow(imgF)
return imgReturn
else:
#do not cut
img=255*img
img=abs(255-img)
imgReturn=img
return imgReturn
def checkDiacritics(img):
import numpy as np
import cv2 as cv2
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
img = cv2.bitwise_not(img)
#print(img)
newX=256
newY=256
#img = cv2.resize(img,(int(newX),int(newY)))
img=img/255
y=img.sum(axis=1)
#num=np.size(img,0)
step=0.1
height=np.size(img,0)
width=np.size(img,1)
x_data = np.arange(height)/height
x_interp = np.arange(1,height-1,step)/height
x_data=x_data[::-1]
x_interp=x_interp[::-1]
#print(y.shape)
#print(width)
#print(height)
f2 = interp1d(x_data, y, kind='cubic')
sumf=0
for x in x_interp:
if x>0.75: #near the first minima
sumf=sumf + (f2(x)*step/(height*width))
# print(x,f2(x))
#print(sumf)
#########plt.plot(y/width, x_data, 'o', f2(x_interp)/width, x_interp, '+')
#plt.plot(y/width, x_data, 'o')
#########plt.figure()
#########plt.imshow(img)
sumf=sumf*1000
if sumf>50:
return False #if there is not diacritics
else:
return True
def applyCircleErosion(word_img):
word_img=createPadding(word_img, 20)
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(9,9))
erode_img = cv2.erode(word_img, kernel, iterations=1)
return erode_img
def xxxapplyCircleErosion2(word_img):
colorvalue = [0, 0, 0]
enlarge_img= cv2.copyMakeBorder(word_img,10,10,10,10,cv2.BORDER_REPLICATE,value=colorvalue)
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(9,9))
erode_img = cv2.erode(enlarge_img, kernel, iterations=1)
return erode_img
def applyHorizontalErosion(source_img):
kernel = np.ones((1,9), np.uint8)
img_erosion = cv2.erode(source_img, kernel, iterations=1)
return img_erosion
def applyWordSegmentation(img,folder):
source_img=img.copy()
#kernel = np.ones((1,9), np.uint8)
#img_erosion = cv2.erode(source_img, kernel, iterations=1)
img_erosion=applyHorizontalErosion(source_img)
#rect_img=source_img
_, contours, hierarchy = cv2.findContours(img_erosion,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for (i, c) in enumerate(contours):
area = cv2.contourArea(c)
#do not segment the very small contours
if area < 200:
# do not save dots and small contours
continue
if area > 10000:
# do not save very big contours
continue
#print("\tSize of contour %d: %d" % (i, len(c)))
(x, y, w1, h1) = cv2.boundingRect(c)
rect_img=cv2.rectangle(source_img, (x, y), (x + w1, y + h1), (0, 255, 0), 1)
#here check for grap missing diacritics
#crop_img_erosion=img_erosion[y:y+h, x:x+w]
#cv2.imwrite('test/'+str(i)+'test_er.png',crop_img_erosion)
#crop_img = img[y:y+h, x:x+w]
###print(area)
#print('000000000000')
###res=checkDiacritics(crop_img)
###print(res)
#print('test/'+ folder +'/'+str(i)+'test.png')
###if res==True:
###crop_img = img[y-5:y+h+5, x:x+w]
#cv2.imwrite('test/'+ folder +'/'+str(i)+'test.png',crop_img)
#yxxx=0
###else:
###crop_img = img[y-10:y+h+5, x:x+w]
#yxxx=0
#cv2.imwrite('test/'+ folder +'/'+str(i)+'test.png',crop_img)
crop_imgOr = img[y-10:y+h1, x:x+w1]
crop_img=crop_imgOr.copy()
wordname=str(y)+'-'+str(x)+'-'+str(w1)+'-'+str(h1)
wordname_path='test/'+ folder +'/'+wordname+'.png'
#w2=crop_img.shape[1]
#h2=crop_img.shape[0]
#for col in range(w2):
#if crop_img[0, col] == 0:
#cv2.floodFill(crop_img, None, (col, 0), 255)
#if crop_img[h2-1, col] == 0:
#cv2.floodFill(crop_img, None, (col, h2-1), 255)
#clean_img=crop_img
#==================
#check these two functions for proper functioning
clean_img=clearBorder(crop_img)
#==================
clean_img=cutPunctuation(clean_img,wordname_path)
#clean_img=clearBorder(clean_img.astype(int))
#print('test/'+ folder +'/'+wordname+'.png')
#cv2.imwrite('test/'+ folder +'/'+str(i)+'test.png',clean_img)
cv2.imwrite('test/'+ folder +'/'+wordname+'.png',clean_img)
#rspimg = cv2.resize(oriimg,(int(newX),int(newY)))
cv2.imwrite('results/'+folder+'___rects.png',rect_img)
cv2.imwrite('results/'+folder+'___erosion.png',img_erosion)
contours_img=cv2.drawContours(img, contours, -1, (0,255,0), 1)
cv2.imwrite('results/'+folder+'___contours.png',contours_img)
def searchData(query,folder):
page=[]
#query=createPadding(query, 20)
erodeimg=applyCircleErosion(query)
#Get the contour of query image
_, QueryContours, _ = cv2.findContours(erodeimg, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
#queryContour=cv2.drawContours(query, contours, 1, (0,255,0), 1)
count=0
for filename in glob.glob("test/"+folder+"/*.png"):
#print(Path(filename))
data = cv2.imread(filename,0)
#Apply erosion to image file
#data=createPadding(data, 20)
erodedata=applyCircleErosion(data)
#Get the contour of image file
_, DataContours, _ = cv2.findContours(erodedata, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
#cnt = sorted(DataContours, key=cv2.contourArea)
#Check if contours are more than one, we need the second largest contour
if len(DataContours)>1:
#print(len(DataContours))
#Calculate HU moments and compare
ret = cv2.matchShapes(QueryContours[1],DataContours[1],3,0.0)
#First filtering with Hu moments
if ret<0.1:
area = cv2.contourArea(DataContours[1])
#print(area)
#print(filename)
#Calculate shape context and compare
sd = cv2.createShapeContextDistanceExtractor()
try:
abc = sd.computeDistance(QueryContours[1],DataContours[1])
except:
print(area)
print(filename)
if abc<0.06:
count=count+1
#plt.figure()
#plt.imshow(erodedata)
#plt.imshow(data)
print(filename)
print('ContoursNum:',len(DataContours))
print('ShapeMatch:',ret)
print('Area:',area)
print('Context:',abc)
(x, y, w, h) = cv2.boundingRect(DataContours[1])
print('Width:',w)
print('====================')
OSfilename=Path(filename)
page.append(OSfilename)
else:
#print(filename)
x=0
#filename='test429.png'
#oriimg = cv2.imread(filename,0)
print(count)
return page
def createView():
import re
#f=open("results/results.html", "r")
# file_contents = f.read()
# print( file_contents)
#str = "The rain in Spain"
#x = re.sub("\\", "/", file_contents)
# x = file_contents.replace('\\', '/')
#
# f.close()
f=open("results/results.html", "r")
content=''
for line in f.readlines():
print(line)
x = line.replace('\\', '/')
x = x.replace('\n', '')
#f.write(x)
print(x)
#content=content+'<a href=../'+x+'>'+x+'</a><br>'+'\n'
content=content+'<a href=../'+x+'>'+x+'</a>'+'<img src=../'+x+'><br>'+'\n'
f.close()
print(content)
f=open("results/results2.html", "w")
f.write(content)
f.close()
| [
"evagelosvar@gmail.com"
] | evagelosvar@gmail.com |
ca837dfcbf930d6469d02412264df601a0216855 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_347/ch1_2020_03_11_20_09_49_458556.py | a589fa5640321f7bdfd640839fc404d8e838e5a3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | def calcula_valor_devido (c, t, i):
M = c*((1+i)**t)
return M | [
"you@example.com"
] | you@example.com |
4941f9ebcf4458f99db5d63c738817d92fa69967 | 5e557741c8867bca4c4bcf2d5e67409211d059a3 | /.circleci/cimodel/data/pytorch_build_data.py | e2172b660e3114b5e40a445762f40a2702cc4f22 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | Pandinosaurus/pytorch | a2bb724cfc548f0f2278b5af2fd8b1d2758adb76 | bb8978f605e203fbb780f03010fefbece35ac51c | refs/heads/master | 2023-05-02T20:07:23.577610 | 2021-11-05T14:01:30 | 2021-11-05T14:04:40 | 119,666,381 | 2 | 0 | NOASSERTION | 2021-11-05T19:55:56 | 2018-01-31T09:37:34 | C++ | UTF-8 | Python | false | false | 9,147 | py | from cimodel.lib.conf_tree import ConfigNode, X, XImportant
CONFIG_TREE_DATA = [
("xenial", [
("gcc", [
("5.4", [ # All this subtree rebases to master and then build
("3.6", [
("important", [X(True)]),
]),
]),
# TODO: bring back libtorch test
]),
("cuda", [
("10.2", [
("3.6", [
# Build are needed for slow_gradcheck
('build_only', [X(True)]),
("slow_gradcheck", [
# If you update this slow gradcheck, you should
# also update docker_definitions.py to make sure
# the docker image match the config used here
(True, [
('shard_test', [XImportant(True)]),
]),
]),
# UNCOMMENT THE BELOW TO REENABLE LIBTORCH
# ("libtorch", [
# (True, [
# ('build_only', [X(True)]),
# ]),
# ]),
]),
]),
]),
]),
("bionic", [
("clang", [
("9", [
("3.6", [
("xla", [XImportant(True)]),
]),
]),
]),
# @jithunnair-amd believes Jenkins builds are sufficient
# ("rocm", [
# ("3.9", [
# ("3.6", [
# ('build_only', [XImportant(True)]),
# ]),
# ]),
# ]),
]),
]
def get_major_pyver(dotted_version):
parts = dotted_version.split(".")
return "py" + parts[0]
class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
self.subtree = subtree
self.init2(node_name)
def modify_label(self, label):
return label
def init2(self, node_name):
pass
def get_children(self):
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
class TopLevelNode(TreeConfigNode):
def __init__(self, node_name, subtree):
super(TopLevelNode, self).__init__(None, node_name, subtree)
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return DistroConfigNode
class DistroConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["distro_name"] = node_name
def child_constructor(self):
distro = self.find_prop("distro_name")
next_nodes = {
"xenial": XenialCompilerConfigNode,
"bionic": BionicCompilerConfigNode,
}
return next_nodes[distro]
class PyVerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["pyver"] = node_name
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
if node_name == "3.9":
self.props["abbreviated_pyver"] = "py3.9"
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ExperimentalFeatureConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["experimental_feature"] = node_name
def child_constructor(self):
experimental_feature = self.find_prop("experimental_feature")
next_nodes = {
"asan": AsanConfigNode,
"xla": XlaConfigNode,
"mlc": MLCConfigNode,
"vulkan": VulkanConfigNode,
"parallel_tbb": ParallelTBBConfigNode,
"noarch": NoarchConfigNode,
"parallel_native": ParallelNativeConfigNode,
"onnx": ONNXConfigNode,
"libtorch": LibTorchConfigNode,
"important": ImportantConfigNode,
"build_only": BuildOnlyConfigNode,
"shard_test": ShardTestConfigNode,
"cuda_gcc_override": CudaGccOverrideConfigNode,
"pure_torch": PureTorchConfigNode,
"slow_gradcheck": SlowGradcheckConfigNode,
}
return next_nodes[experimental_feature]
class SlowGradcheckConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_slow_gradcheck"] = True
def child_constructor(self):
return ExperimentalFeatureConfigNode
class PureTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PURE_TORCH=" + str(label)
def init2(self, node_name):
self.props["is_pure_torch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class XlaConfigNode(TreeConfigNode):
def modify_label(self, label):
return "XLA=" + str(label)
def init2(self, node_name):
self.props["is_xla"] = node_name
def child_constructor(self):
return ImportantConfigNode
class MLCConfigNode(TreeConfigNode):
def modify_label(self, label):
return "MLC=" + str(label)
def init2(self, node_name):
self.props["is_mlc"] = node_name
def child_constructor(self):
return ImportantConfigNode
class AsanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Asan=" + str(label)
def init2(self, node_name):
self.props["is_asan"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ONNXConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Onnx=" + str(label)
def init2(self, node_name):
self.props["is_onnx"] = node_name
def child_constructor(self):
return ImportantConfigNode
class VulkanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Vulkan=" + str(label)
def init2(self, node_name):
self.props["is_vulkan"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelTBBConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELTBB=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "paralleltbb"
def child_constructor(self):
return ImportantConfigNode
class NoarchConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_noarch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelNativeConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELNATIVE=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "parallelnative"
def child_constructor(self):
return ImportantConfigNode
class LibTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "BUILD_TEST_LIBTORCH=" + str(label)
def init2(self, node_name):
self.props["is_libtorch"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class CudaGccOverrideConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["cuda_gcc_override"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class BuildOnlyConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["build_only"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ShardTestConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["shard_test"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ImportantConfigNode(TreeConfigNode):
def modify_label(self, label):
return "IMPORTANT=" + str(label)
def init2(self, node_name):
self.props["is_important"] = node_name
def get_children(self):
return []
class XenialCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return XenialCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class BionicCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return BionicCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class XenialCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
class BionicCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
44a9455dc95a0c5fc72de765d0a3d877d796be68 | e888abf88ecfd52d071bd9e8543a927adcfeb515 | /free_lenta/settings.py | 04c30232950a6dc08f365e62c7c5849be3478dc3 | [] | no_license | freelenta/free-lenta | 09431c8bd646761e7cdc5093727a6600863843ab | b393879315ef6abbffceb715da0584acd28c8bdb | refs/heads/master | 2021-01-01T05:32:32.765368 | 2014-06-13T18:16:48 | 2014-06-13T18:16:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | """
Django settings for free_lenta project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f5c@nb#*qrvlx0qds!opwjpk-612c)#y^--v7p!bes0v1=fqqc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news_site',
'south',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'free_lenta.urls'
WSGI_APPLICATION = 'free_lenta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'hamlet',
'USER': 'alexey',
'PASSWORD': 'ried7eSh',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
) | [
"sokolovbiotech@gmail.com"
] | sokolovbiotech@gmail.com |
cc3c1c7ee4ce12b2a208ca471b5b1cbe7ea9dc29 | b19abe46b187cc2887bef27eedeae3af74f2e373 | /MN(+AS)/models/__init__.py | 8a3fc1aa6d2c2b455fc26f3c98d806e4ddd726f9 | [] | no_license | DeepLearnXMU/PSSAttention | 4c22a3e37d48c991017d9d9bf1d4b5cdf6167001 | 7e23d8d1b6305c2f77e526b2bcef72dbbcf29d21 | refs/heads/master | 2022-02-13T01:51:56.754195 | 2022-02-04T15:50:24 | 2022-02-04T15:50:24 | 189,739,771 | 93 | 19 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import thumt.models.BL_MN
import thumt.models.FINAL_BL_MN
def get_model(name):
if name == "BL_MN":
return thumt.models.BL_MN.BL_MN
elif name == "FINAL_BL_MN":
return thumt.models.FINAL_BL_MN.FINAL_BL_MN
else:
raise LookupError("Unknown model %s" % name)
| [
"noreply@github.com"
] | DeepLearnXMU.noreply@github.com |
a709bad42f707666ce609d13d44be5d5d8b8f734 | 0cd57b4784780098da1030f5af2f99517d0b4e1a | /pipeline/UserModel.py | 4717d9eef0521786c6d11fc41a6ab85ef681b21b | [] | no_license | TBruyn/Modeling-and-Data-Analysis-in-Complex-Networks | fe17605d2c267edd862738b33c2d3079383d78b0 | d91fd46428de4c4f7e7e00debdedf7890174c212 | refs/heads/master | 2021-01-26T00:43:03.012282 | 2020-04-21T11:40:30 | 2020-04-21T11:40:30 | 243,244,941 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | py | import GraphLoader
import json
import networkx as nx
import numpy as np
import random
import pprint
import os
import time
current_number_of_visitors = 991
entry_pages = json.loads(open('pipeline/entry_pages.json', 'r').read())
class UserModel:
def __init__(self, graph):
self.graph = graph
def random_walk_from_node(self, entry_point):
def choose_next_point(current_point):
p_exit = self.graph.nodes[current_point]['attr_data']['p_exit']
edges = self.graph.edges(current_point, data=True)
next_list = [v for u, v, d in edges if v != current_point]
next_list.insert(0, 'exit')
p_list = [d['p'] for u, v, d in edges if v != current_point]
p_list.insert(0, p_exit)
next_index = np.random.choice(list(range(0, len(p_list))), p=p_list)
return next_list[next_index]
# TODO implement more realistic probability function
def p_next_step(steps):
return 1
virtual_user_traffic = {
'pagelist': [self.graph.nodes[entry_point]['attr_data']],
'page_views': 1
}
next_node = entry_point
while np.random.random(1)[0] < p_next_step(virtual_user_traffic['page_views']):
next_node = choose_next_point(next_node)
if next_node == 'exit':
return virtual_user_traffic
else:
virtual_user_traffic['pagelist'].append(self.graph.nodes[next_node]['attr_data'])
virtual_user_traffic['page_views'] = len(virtual_user_traffic['pagelist'])
return virtual_user_traffic
def random_walk_from_node_list(self, nodelist=entry_pages):
return [self.random_walk_from_node(node) for node in nodelist if node in self.graph.nodes]
def random_walk_n_nodes(self, n=current_number_of_visitors):
nlist = random.choices(list(self.graph.nodes), k=n)
return self.random_walk_from_node_list(nlist)
def save_traffic_data(self, traffic_data, nametag=""):
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
print(dname)
os.chdir(dname)
timestamp = time.asctime(time.localtime(time.time())).replace(' ', '_')
with open('generated_user_traffic/generated_traffic_' + nametag + '.json', 'w', encoding='utf-8') as file:
json.dump(traffic_data, file, indent=4)
def test():
pages = json.loads(
open(
'/home/tim/Documents/Modeling-and-Data-Analysis-in-Complex-Networks/final/data/merged_traffic_array_latest.json',
'r').read())
visits = [[(visit['url'], visit['last_page_view']) for visit in p['pagelist']] for p in pages if 'pagelist' in p]
entry_pages_with_timestamp = [sorted(visit, key=lambda x: x[1])[0] for visit in visits if len(visit) > 0]
entry_pages = [p for p, t in entry_pages_with_timestamp]
with open('pipeline/entry_pages.json', 'w', encoding='utf-8') as file:
json.dump(entry_pages, file, indent=4)
test()
| [
"timtbruyn@gmail.com"
] | timtbruyn@gmail.com |
182933ad1e32acc47eb2cfc12c855e4c86b85ade | e116a28a8e4d07bb4de1812fde957a38155eb6df | /polar_test2.py | 0ee481b4a0480d2965c11c666d56aaca4fe2291a | [] | no_license | gl-coding/EasyPyEcharts | 5582ddf6be3158f13663778c1038767a87756216 | f9dbe8ad7389a6e2629643c9b7af7b9dc3bfccd5 | refs/heads/master | 2020-09-29T20:48:46.260306 | 2019-12-10T12:52:24 | 2019-12-10T12:52:24 | 227,119,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | #encoding=utf-8
from pyecharts import Polar
radius =['周一', '周二', '周三', '周四', '周五', '周六', '周日']
polar =Polar("极坐标系-堆叠柱状图示例", width=1200, height=600)
polar.add("", [1, 2, 3, 4, 3, 5, 1], radius_data=radius, type='barAngle', is_stack=True)
polar.add("", [2, 4, 6, 1, 2, 3, 1], radius_data=radius, type='barAngle', is_stack=True)
polar.add("", [1, 2, 3, 4, 1, 2, 5], radius_data=radius, type='barAngle', is_stack=True)
polar.show_config()
polar.render()
| [
"1451607278@qq.com"
] | 1451607278@qq.com |
bae46eca925d4eec064cfa40ac5ad479eccddd16 | 6a01a9287a4c23c7f11b7c5399cfb96bbe42eba8 | /python/scripts/get_nearest_k_features.py | 068a702adc3492255203e07630813f3fd49b6ade | [
"MIT"
] | permissive | xguse/gmm-to-gff-transcripts-vs-snps | 3c25bf2752aee76174d5dab92060fe7269caf99f | 75337135ab8ff6d840af3cfccfe6404a06777a54 | refs/heads/master | 2021-01-19T01:50:33.473897 | 2016-08-02T20:31:18 | 2016-08-02T20:31:18 | 54,731,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | """For each SNP file, produce a bed representing the nearest k gene or mapped transcript features and its distance from the SNP."""
import pybedtools as pbt
import pandas as pd
k_number = snakemake.params.k_number
snp_beds = snakemake.input.snp_beds
gene_model_subtracted = snakemake.input.gene_model_subtracted
gene_models = snakemake.input.gene_models
nearest_features_beds = snakemake.output.nearest_features_beds
snps_in_features = snakemake.output.snps_in_features
headers = ["SNP_chrom",
"SNP_start",
"SNP_end",
"feature_set_name",
"chrom",
"chromStart",
"chromEnd",
"name",
"score",
"strand",
"thickStart",
"thickEnd",
"itemRgb",
"blockCount",
"blockSizes",
"blockStarts",
"distance"
]
for snp_bed, nearest_bed, feature_hit_file in zip(snp_beds, nearest_features_beds, snps_in_features):
snp_bed = pbt.BedTool(snp_bed)
gene_model_subtracted_bed = pbt.BedTool(gene_model_subtracted)
gene_models_bed = pbt.BedTool(gene_models)
k_nearest = snp_bed.closest([gene_model_subtracted_bed.fn,
gene_models_bed.fn],
k=k_number,
names=['novel_mapped_tx', 'official_annotations'],
D='ref', # Include SIGNED distances from SNP based on the ref genome
t='all', # Return all members of a distance "tie"
mdb='each', # Return `k_number` of neighboors for each `names`
)
k_nearest.saveas(nearest_bed)
nearest_df = pd.read_csv(nearest_bed, sep="\t", names=headers)
nearest_df
in_features = nearest_df.query(""" abs(distance) <= 0 """)
in_features.to_excel(feature_hit_file, index=False)
| [
"wadunn83@gmail.com"
] | wadunn83@gmail.com |
ce4d1a0078a611ab60cd674de235313bd91706a0 | 22f539bf2a49d8134812ee6be153cc886c13b24f | /Python_3/coursera_form/coursera_form/settings.py | 6bece5dbe4ae07e747d5e076fa3a46f544867dee | [] | no_license | momsspaghettti/coursera-programming-in-python | ba21c49aa9499560942be00d92af0f1e06b01912 | e5a5445312d84ccbf3a93676162b513b265681a8 | refs/heads/master | 2021-08-08T06:26:28.573308 | 2021-06-28T17:56:52 | 2021-06-28T17:56:52 | 188,097,701 | 14 | 17 | null | 2021-06-10T21:35:14 | 2019-05-22T19:03:15 | Python | UTF-8 | Python | false | false | 3,203 | py | """
Django settings for coursera_form project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^pwvw)(b3_e%gxe3yf&_gafp4=#4($*zj0wx_ut)=u=ckveyiw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'feedback.apps.FeedbackConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'coursera_form.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'coursera_form.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login/'
| [
"ivan.samoilov.1999@yandex.ru"
] | ivan.samoilov.1999@yandex.ru |
e08d6246e043f965cb48fe53ece42737d84fe2c7 | 68036b3b5c4083b55218b678ef4eaa3818252f75 | /electrostatics.py | 7ecb30dbe19583bc3e298270bfe50a9c60bb995f | [] | no_license | pyproblems/book | 2a46688eb874133fb7033a41f46c5c02baf686b3 | 8ee638ed0bbd44549da421260fb0a6da3c32dec9 | refs/heads/master | 2023-08-03T13:16:27.048096 | 2021-09-29T18:15:34 | 2021-09-29T18:15:34 | 291,084,427 | 4 | 1 | null | 2020-10-30T17:16:03 | 2020-08-28T15:42:40 | Jupyter Notebook | UTF-8 | Python | false | false | 2,141 | py | # -*- coding: utf-8 -*-
"""This module defines some essential functions for the two notebooks."""
import time
import numpy as np
k_e = 1 / (4 * np.pi * 8.8541878128e-12)
def E(x, y, q, r):
"""Electric field.
Args:
x (float): X position(s).
y (float): Y position(s).
q (float): Charge(s).
r (iterable of float): (x, y) position(s) of the point charge(s). If an array is given,
it should be a (2, N) array where N is the number of point charges.
Returns:
float: Electric field vectors at every point in `x` and `y`. The shape of
this array is the same shape as `x` and `y` with an added initial dimension.
"""
# Calculate the distance of each requested point from the point charge.
d = ((x - r[0]) ** 2 + (y - r[1]) ** 2) ** 0.5
magnitudes = k_e * q / d ** 2
# Calculate unit vector components.
xs = (x - r[0]) / d
ys = (y - r[1]) / d
return np.concatenate(
((xs * magnitudes)[np.newaxis], (ys * magnitudes)[np.newaxis]), axis=0
)
def E_dir(x, y, q, r):
"""Electric field direction at one point (x, y).
Args:
x (float): x position.
y (float): y position.
q (float): Charge(s).
r (iterable of float): (x, y) position(s) of the point charge(s). If an array is given,
it should be a (2, N) array where N is the number of point charges.
Returns:
float: Normalised electric field vectors at every point in `x` and `y`. The shape of
this array is the same shape as `x` and `y` with an added initial dimension.
"""
E_field = np.sum(E(x, y, q, r), axis=1)
# Normalise the electric field vectors.
return E_field / np.linalg.norm(E_field)
class Timer:
"""Can be used as a context manager to time events."""
def __init__(self, name="operation"):
self.name = name
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
print(f"{self.name} took {self.interval:0.1e} seconds.")
| [
"lloyd@ltj.me.uk"
] | lloyd@ltj.me.uk |
e306115abcfd5059eac290a453f3b6fe5a1f0982 | cf4d4c2a30806105248a920f43658e65306220bb | /46Hash.py | 633fc571e3174ed37bfe99491609a47c1ab18ee5 | [] | no_license | 121121lol/python-qs | 6714dcb8b3af4dc438330e6e9f8d14901db5746b | 841783574deade0b027ba83426aa7086ce7f22fa | refs/heads/master | 2022-11-29T22:58:59.128694 | 2020-08-15T14:32:27 | 2020-08-15T14:32:27 | 287,461,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | import hashlib
def hash_file(filename):
h = hashlib.sha1()
with open(filename,'rb') as file:
chunk = 0
while chunk != b'':
chunk = file.read(1024)
h.update(chunk)
return h.hexdigest()
message = hash_file("track1.mp3")
print(message) | [
"noreply@github.com"
] | 121121lol.noreply@github.com |
bc0e60af0d5ae9372d357a3e7e260497d2f7cb40 | 4fc9ffd554298ab03e59b511d5258535972ef88f | /pms/clinical/migrations/0002_labreport_status.py | 583be7b6953f37f9f51a07237d294fd051642b93 | [] | no_license | lasith98/wecare | 83670b1034ac57ceb832ef5ecaf90b553e55debb | 4d5fb6e8028f809ed786177da66f993e44517480 | refs/heads/main | 2023-04-11T00:49:48.378576 | 2021-04-19T19:53:40 | 2021-04-19T19:53:40 | 340,083,660 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # Generated by Django 3.1.6 on 2021-04-15 11:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clinical', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='labreport',
name='status',
field=models.CharField(choices=[('Queue', 'In Queue'), ('Start Processing', 'Start Processing'), ('Complete', 'Completed')], default='Queue', max_length=20),
),
]
| [
"lasithhasana9@gmail.com"
] | lasithhasana9@gmail.com |
82a6f0b1e2bc37b275cd9f5a9eca19836159da3b | 07095cba7db2ede53214cc43c6c16ee6817c7da0 | /1-modulo-Intro-Python/datos/numpy/operador_ternario.py | 9cf2e14fa7e057567801e495220cd1501d7b91f8 | [] | no_license | carlosjoset/data-science-desafio | 39a2b07be7eee1c64634870c41bee3a55ee9fdd9 | 69a4e93c56ab81183527a3c8b362267d2b2c2987 | refs/heads/master | 2022-04-10T17:57:02.962638 | 2020-03-06T22:28:05 | 2020-03-06T22:28:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import numpy as np
# retorna un nuevo numpy array con valores reasignados
# np.where(una condiciones, se cumple, no se cumple)
notas = np.array([4.5, 6.6, 7.0, 2.0, 3.6, 4.6, 5.6, 5.8, 2.5])
notas.mean()
# Out[76]: 4.688888888888889
notas_bin = np.where(notas >= notas.mean(), 1, 0)
# Out[78]: array([0, 1, 1, 0, 0, 0, 1, 1, 0])
# notas[notas >= notas.mean()] | [
"lporras16@gmail.com"
] | lporras16@gmail.com |
4857c5fd7f25a1b346614f3d7fdb81c556a078c3 | ee6dd131bf143baa9d84521e089ca930abfa4ce4 | /src/pyfmu/fmi2/slave.py | b57fbabccc3ee1ffb7c022dcdb84133c81ece34f | [] | no_license | manasdas17/pyfmu | eed4eb40324e698918086085254e9fef6f57a130 | fc9bd0f17c0f2945a05727bab78c5225d566d8d8 | refs/heads/master | 2023-02-05T06:15:19.665417 | 2020-12-14T11:39:28 | 2020-12-14T11:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,270 | py | from __future__ import annotations
from typing import List, Tuple, Optional, Literal, Callable
from uuid import uuid4
from pyfmu.fmi2.exception import SlaveAttributeError
from pyfmu.fmi2.logging import Fmi2LoggerBase, FMI2PrintLogger
from pyfmu.fmi2.types import (
Fmi2Status,
Fmi2Value_T,
Fmi2Status_T,
Fmi2ScalarVariable,
Fmi2DataType_T,
Fmi2Variability_T,
Fmi2Causality_T,
Fmi2Initial_T,
)
class Fmi2Slave:
def __init__(
self,
model_name: str,
author: str = None,
copyright: str = None,
version: str = None,
description: str = None,
logger: Fmi2LoggerBase = None,
register_standard_log_categories=True,
):
"""Constructs a new FMI2 slave
Args:
model_name (str): [description]
author (str, optional): [description]. Defaults to None.
copyright (str, optional): [description]. Defaults to None.
version (str, optional): [description]. Defaults to None.
description (str, optional): [description]. Defaults to None.
logger (FMI2SlaveLogger, optional): [description]. Defaults to None.
"""
self.author = author
self.copyright = copyright
self.description = description
self.model_name = model_name
self.license = license
self.guid = str(uuid4())
if logger is None:
logger = FMI2PrintLogger(model_name=model_name)
self._variables: List[Fmi2ScalarVariable] = []
self._version = version
self._value_reference_counter = 0
self._used_value_references = {}
self._logger = logger
if register_standard_log_categories:
self.register_log_category(
"logStatusWarning", lambda m, c, s: c == Fmi2Status.warning
)
self.register_log_category(
"logStatusDiscard", lambda m, c, s: c == Fmi2Status.discard
)
self.register_log_category(
"logStatusError", lambda m, c, s: s == Fmi2Status.error
)
self.register_log_category(
"logStatusFatal", lambda m, c, s: s == Fmi2Status.fatal
)
self.register_log_category(
"logStatusPending", lambda m, c, s: s == Fmi2Status.pending
)
self.register_log_category("logAll", lambda m, c, s: True)
def register_input(
self,
attr_name: str,
data_type: Literal["real", "integer", "boolean", "string"] = "real",
variability: Literal["continuous", "discrete"] = "continuous",
description: str = None,
) -> None:
"""Declares a new input of the model.
This is added to the model description as a scalar variable with causality=input.
Args:
attr_name: name of the variable.
data_type: the underlying type of the variable. Defaults to "real".
variability: defines when the variable may change value with respect to time. Defaults to "continuous".
description: text added to model description, often displayed by simulation environment. Defaults to None.
"""
self._register_variable(
attr_name, data_type, "input", variability, None, description
)
def register_output(
self,
attr_name: str,
data_type: Literal["real", "integer", "boolean", "string"] = "real",
variability: Literal["constant", "discrete", "continuous"] = "continuous",
initial: Literal["approx", "calculated", "exact"] = "calculated",
description: str = None,
) -> None:
"""Declares a new output of the model
This is added to the model description as a scalar variable with causality=output.
"""
self._register_variable(
attr_name, data_type, "output", variability, initial, description
)
def register_parameter(
self,
attr_name: str,
data_type: Literal["real", "integer", "boolean", "string"] = "real",
variability: Literal["fixed", "tunable"] = "tunable",
description: str = None,
) -> None:
self._register_variable(
attr_name, data_type, "parameter", variability, "exact", description
)
def _register_variable(
self,
attr_name: str,
type: Fmi2DataType_T,
causality: Fmi2Causality_T,
variability: Fmi2Variability_T,
initial: Optional[Fmi2Initial_T],
description: str = None,
) -> None:
"""Expose an attribute of the slave as an variable of the model.
Args:
attr_name (str): name of the attribute
type (Fmi2DataType_T): [description]
causality (Fmi2Causality_T): [description]
variability (Fmi2Variability_T): [description]
initial (Optional[Fmi2Initial_T]): [description]
description (str, optional): [description]. Defaults to None.
Raises:
Fmi2SlaveError: raised if a combination of variables are provided which does not
"""
if attr_name in [v.name for v in self.variables]:
raise SlaveAttributeError(f"Attribute has already been registered.")
if initial in {"approx", "exact"} or causality == "input":
try:
start = getattr(self, attr_name)
except Exception as e:
raise SlaveAttributeError(
f"""Failed determining a start value for the variable {attr_name}. Ensure that an attribute matching the name of the registered variable has been declared."""
) from e
else:
start = None
value_reference = self._acquire_unused_value_reference()
v = Fmi2ScalarVariable(
attr_name,
type,
causality,
variability,
value_reference,
initial,
start,
description,
)
self._variables.append(v)
def register_log_category(
self, name: str, predicate: Callable[[str, str, Fmi2Status_T], bool]
):
"""Register a new log category which may be used by the envrionment to filter log messages.
A predicate function is used to determine which messages match the specified category.
Args:
name: identifier added to the model descriptions log categories.
predicate: function used to determine whether message belongs to this log category.
Examples:
Filter based on category:
>>> self.register_log_category("gui", lambda message, catergory, status: catergory == "gui")
"""
self._logger.register_new_category(name, predicate)
def do_step(
self, current_time: float, step_size: float, no_set_fmu_state_prior: bool
) -> Fmi2Status_T:
return Fmi2Status.ok
def get_xxx(self, references: List[int]) -> Tuple[List[Fmi2Value_T], Fmi2Status_T]:
raise NotImplementedError()
def set_xxx(self, references: List[int], values: List[Fmi2Value_T]) -> Fmi2Status_T:
raise NotImplementedError()
def setup_experiment(
self, start_time: float, stop_time: float = None, tolerance: float = None
) -> Fmi2Status_T:
return Fmi2Status.ok
def set_debug_logging(
self, categories: list[str], logging_on: bool
) -> Fmi2Status_T:
"""Set the active categories for which messages are passed to the evironment.
Note that a special case of "categories == [] and logging_on = True" is defined to have special significance.
This is equivalent to logging all debug messages irregardless of category, see 2.1.5 p.21.
Args:
logging_on: flag used to indicate whether the specified categories should be enabled or not
categories: list of categories to enable/disable
"""
self._logger.set_debug_logging(logging_on, categories)
return Fmi2Status.ok
def enter_initialization_mode(self) -> Fmi2Status_T:
return Fmi2Status.ok
def exit_initialization_mode(self) -> Fmi2Status_T:
return Fmi2Status.ok
def reset(self) -> Fmi2Status_T:
"""Inform the FMU to set its internal state to that match that of a freshly instantiated FMU.
Resources such as file handles and GUI windows may be reused, as long
as the state that influences the simulation is reset.
Returns:
Fmi2Status_T: status code indicating the success of the operation
"""
return Fmi2Status.ok
def terminate(self) -> Fmi2Status_T:
r"""Informs the FMU that the simulation has terminated and allows the
environment read the final values of variables.
For cleanup of managed resources it is recommended to use the regular Python pattern
of defining function \_\_del\_\_ referred to as a finalizer.
Returns:
Fmi2Status_T: status code indicating the success of the operation
"""
return Fmi2Status.ok
def _acquire_unused_value_reference(self) -> int:
""" Returns the an unused value reference
"""
while True:
vr = self._value_reference_counter
self._value_reference_counter += 1
if vr not in self._used_value_references:
return vr
def log_ok(
self, msg: str, category: str = None, exc_info=False, stack_info=False,
):
self._log(
status=Fmi2Status.ok,
msg=msg,
category=category,
exc_info=exc_info,
stack_info=stack_info,
)
def log_warning(
self,
msg: str,
category: str = None,
exc_info=False,
stack_info=False,
stack_level: float = None,
):
self._log(
status=Fmi2Status.warning,
msg=msg,
category=category,
exc_info=exc_info,
stack_info=stack_info,
)
def log_discard(
self,
msg: str,
category: str = None,
exc_info=False,
stack_info=False,
stack_level: float = None,
):
self._log(
status=Fmi2Status.discard,
msg=msg,
category=category,
exc_info=exc_info,
stack_info=stack_info,
)
def log_error(
self,
msg: str,
category: str = None,
exc_info=False,
stack_info=False,
stack_level: float = None,
):
self._log(
status=Fmi2Status.error,
msg=msg,
category=category,
exc_info=exc_info,
stack_info=stack_info,
)
def log_fatal(
self,
msg: str,
category: str = None,
exc_info=False,
stack_info=False,
stack_level: float = None,
):
self._log(
status=Fmi2Status.fatal,
msg=msg,
category=category,
exc_info=exc_info,
stack_info=stack_info,
)
def log_pending(
self,
msg: str,
category: str = None,
exc_info=False,
stack_info=False,
stack_level: float = None,
):
self._log(
status=Fmi2Status.pending,
msg=msg,
category=category,
exc_info=exc_info,
stack_info=stack_info,
)
def _log(
self,
status: Fmi2Status_T,
msg: str,
category: str = None,
exc_info=False,
stack_info=False,
stack_level: float = None,
):
self._logger.log(
status=status,
msg=msg,
category=category,
exc_info=exc_info,
stack_info=stack_info,
stack_level=stack_level,
)
@property
def log_categories(self) -> List[str]:
"""List of available log categories.
"""
return self._logger._category_to_predicates.keys()
@property
def variables(self) -> List[Fmi2ScalarVariable]:
return self._variables
| [
"clegaard@outlook.com"
] | clegaard@outlook.com |
c8828896c602e3fc526f1c2bd72f9c19201ff0f0 | 725b019934fb9230c4039bc369b1685477bb2a35 | /family/migrations/0002_auto_20191004_1857.py | 6caafe0cdced4892326b84aedf3b02823be77fc4 | [] | no_license | IMRAN104/hams-django | cd726069ca670b9ce78a19211e214b5b8153f1cc | be12ef158850476f0a37e883447f1a34c9f3052e | refs/heads/master | 2020-07-30T07:33:12.284849 | 2020-02-18T06:11:42 | 2020-02-18T06:11:42 | 210,136,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | # Generated by Django 2.2.5 on 2019-10-04 12:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('family', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='family',
name='childname',
),
migrations.RemoveField(
model_name='family',
name='spousename',
),
migrations.AddField(
model_name='family',
name='spouseDOB',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='family',
name='spouseEmail',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='family',
name='spouseMobile',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='family',
name='spouseNID',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='family',
name='spouseNIDPicture',
field=models.ImageField(null=True, upload_to='spouse_nid_pic'),
),
migrations.AddField(
model_name='family',
name='spouseName',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='family',
name='spousePicture',
field=models.ImageField(null=True, upload_to='spouse_pic'),
),
migrations.CreateModel(
name='Child',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('childName', models.CharField(blank=True, max_length=100, null=True)),
('childDOB', models.DateField(null=True)),
('childPicture', models.ImageField(null=True, upload_to='spouse_pic')),
('Family', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='family.Family')),
],
),
]
| [
"omeca13@gmail.com"
] | omeca13@gmail.com |
ae5f6a56b1dd9225cfb080b788cdc31d7483c321 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_mc1.py | fa05bea2ae6ea09939c9d07048036355cd040bf2 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,305 | py | from xcp2k.inputsection import InputSection
from _avbmc1 import _avbmc1
from _move_probabilities1 import _move_probabilities1
from _move_updates1 import _move_updates1
from _max_displacements1 import _max_displacements1
class _mc1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Nstep = None
self.Iprint = None
self.Nmoves = None
self.Nswapmoves = None
self.Lbias = None
self.Lstop = None
self.Ldiscrete = None
self.Rclus = None
self.Restart = None
self.Nvirial = None
self.Ensemble = None
self.Restart_file_name = None
self.Moves_file_name = None
self.Molecules_file_name = None
self.Coordinate_file_name = None
self.Energy_file_name = None
self.Data_file_name = None
self.Cell_file_name = None
self.Max_disp_file_name = None
self.Box2_file_name = None
self.Pressure = None
self.Temperature = None
self.Virial_temps = None
self.Discrete_step = None
self.Eta = None
self.Randomtoskip = None
self.AVBMC = _avbmc1()
self.MOVE_PROBABILITIES = _move_probabilities1()
self.MOVE_UPDATES = _move_updates1()
self.MAX_DISPLACEMENTS = _max_displacements1()
self._name = "MC"
self._keywords = {'Lstop': 'LSTOP', 'Nswapmoves': 'NSWAPMOVES', 'Lbias': 'LBIAS', 'Box2_file_name': 'BOX2_FILE_NAME', 'Nvirial': 'NVIRIAL', 'Ensemble': 'ENSEMBLE', 'Temperature': 'TEMPERATURE', 'Data_file_name': 'DATA_FILE_NAME', 'Pressure': 'PRESSURE', 'Restart': 'RESTART', 'Cell_file_name': 'CELL_FILE_NAME', 'Moves_file_name': 'MOVES_FILE_NAME', 'Iprint': 'IPRINT', 'Rclus': 'RCLUS', 'Eta': 'ETA', 'Molecules_file_name': 'MOLECULES_FILE_NAME', 'Virial_temps': 'VIRIAL_TEMPS', 'Randomtoskip': 'RANDOMTOSKIP', 'Max_disp_file_name': 'MAX_DISP_FILE_NAME', 'Restart_file_name': 'RESTART_FILE_NAME', 'Coordinate_file_name': 'COORDINATE_FILE_NAME', 'Nmoves': 'NMOVES', 'Discrete_step': 'DISCRETE_STEP', 'Energy_file_name': 'ENERGY_FILE_NAME', 'Ldiscrete': 'LDISCRETE', 'Nstep': 'NSTEP'}
self._subsections = {'AVBMC': 'AVBMC', 'MOVE_UPDATES': 'MOVE_UPDATES', 'MAX_DISPLACEMENTS': 'MAX_DISPLACEMENTS', 'MOVE_PROBABILITIES': 'MOVE_PROBABILITIES'}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
b81ec6831b002501041bfbdfa535ca459f6e59e9 | 56fc520e02e948a72ffe1d8fa5dcdc331cc0376b | /read_plate.py | 9e6704b9f33f986cc385a33c09d3eef282a2c754 | [] | no_license | Imposter/CSCI4220U_FinalProject | 21453fcc906e2d69dd5a178fce1efbf650e008b8 | 8edec45fcaff6865edb2437069a4a3b07a24ea1c | refs/heads/master | 2020-05-05T05:57:38.127137 | 2019-04-06T00:15:51 | 2019-04-06T00:15:51 | 179,770,427 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | """
License Plate Recognition
CSCI4220U: Computer Vision - Final Project
Authors: Eyaz Rehman (100584735), Rishabh Patel (100583380)
Date: April 5th, 2019
"""
import cv2
import numpy as np
from find_plate import find_plates
from segment_characters import find_characters
from ocr import OCR
from imutils import resize
from time import time
def pad(img, pad_x=0.0, pad_y=0.0, color=(255, 255, 255)):
# Add padding around image
top = int(pad_y * img.shape[0]) # rows
bottom = top
left = int(pad_x * img.shape[1]) # columns
right = left
return cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, None, color)
def read_plate(ocr, img, plate_dimensions, dimension_error, nearby_threshold, size_threshold, plate_height=64, debug=False):
results = list()
# Locate plates
plates = find_plates(img, plate_dimensions, dimension_error, nearby_threshold, size_threshold, debug=False)
for plate in plates:
plate = tuple(np.array(plate).astype(int))
p_x, p_y, p_w, p_h = plate
# Crop image
pimg = img[p_y:p_y+p_h, p_x:p_x+p_w]
# Resize image
rimg = resize(pimg, height=plate_height)
# Get characters in plate
characters = find_characters(pimg, height=plate_height, debug=debug)
# Collect characters
plate_chars = list()
for c_box, cimg_bw in characters:
# Convert float tuple to int tuple
c_box = tuple(np.array(c_box).astype(int))
c_x, c_y, c_w, c_h = c_box
# Crop image
cimg = rimg[c_y:c_y+c_h, c_x:c_x+c_w]
# Resize images
cimg = pad(cimg, pad_x=0.05, pad_y=0.0)
cimg = resize(cimg, height=32)
plate_chars.append(cimg)
# If there are no characters in the plate, skip
if not len(plate_chars):
continue
# Merge plate characters
plate_img = np.concatenate(tuple(plate_chars), axis=1)
# Debug
if debug:
cv2.imshow("Plate Characters", plate_img)
cv2.waitKey()
# Perform OCR
predictions = ocr.predict(plate_img)
# Store result
results.append(predictions)
return results | [
"scrappycocos@hotmail.com"
] | scrappycocos@hotmail.com |
a8ea7377742c365751ae429fa8b49dbf096e62aa | 06ce3845c976a589be13ac81ca2dfe8475cca2b2 | /old/test_EXSY.py | ba0a8b21e199971dd04d44a5ff7da8cfa61180e2 | [] | no_license | santimaldo/Bruker | feefe15909adbf6a506c40429bce0da5d1833e8a | a4ef677d2471c03800574fc5657ee35021e27135 | refs/heads/master | 2023-08-13T20:55:29.886359 | 2023-07-21T01:00:04 | 2023-07-21T01:00:04 | 200,155,543 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,188 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 7 22:00:55 2020
@author: santi
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
#------------------------------------------------------------------------------
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
#------------------------------------------------------------------------------
def derivar(array, ws=151, po=3):
"""
esta funcion deriva y suaviza
ws: window size
po: polynomial order
"""
d = np.diff(array)
d = savgol_filter(d, ws, po)
# chequeo que no se muy grande el ws
cond1 = np.abs(d[0])>=(np.abs(d).max()/20)
cond2 = np.abs(d[-1])>=(np.abs(d).max()/20)
while cond1 or cond2:
ws = ws-20
d = np.diff(array)
d = savgol_filter(d, ws, po)
cond1 = np.abs(d[0])>=(np.abs(d).max()/10)
cond2 = np.abs(d[-1])>=(np.abs(d).max()/10)
return d
#------------------------------------------------------------------------------
def find_peak(ppmDir, ppmInd, spec, x, y, delta_x=2.5, delta_y=2.5):
"""
Esta funcion busca el maximo de spec alrededeor de un punto x,y, en un
cuadrado de 2*delta_x por 2*delta_y
delta_x, delta_y sirven para definir nx, ny, esto es: cuantos pixels
definir el lugar para buscar el maximo
delta_x : ppm hacia izquierda y derecha
delta_y : ppm hacia arriba y abajo
"""
x_index = find_nearest(ppmDir, x)
y_index = find_nearest(ppmInd, y)
# cuantos ppm son un paso en cada direccion
stepDir = np.abs(ppmDir[1]-ppmDir[0])
stepInd = np.abs(ppmInd[1]-ppmInd[0])
nx = int(delta_x/stepDir)
ny = int(delta_y/stepInd)
spec_reduced = spec[y_index-ny:y_index+ny, x_index-nx:x_index+nx]
ppmDir_reduced = ppmDir[x_index-nx:x_index+nx]
ppmInd_reduced = ppmInd[y_index-ny:y_index+ny]
maximo = spec_reduced.max()
yy, xx = np.where(spec_reduced==maximo)
x = ppmDir_reduced[xx[0]]
y = ppmInd_reduced[yy[0]]
x_index = find_nearest(ppmDir, x)
y_index = find_nearest(ppmInd, y)
plt.contourf(ppmDir_reduced,ppmInd_reduced,spec_reduced)
return x_index, y_index
#------------------------------------------------------------------------------
def find_zeros(array, inicio_index, x_axis=None, N=5):
"""
el objetivo de esta funcion es darle la derivada del spec, y que desde el
punto inicio_index, recorra el array hasta que llegue a cero
"""
# cerca del centro pude pasar varias veces por el cero. A izquiuerda y
# derecha debe tener distintos signos. busco los puntos donde esto ocurre y
# desde ahi arranco.
sgn_left = np.sign(array[inicio_index-N])
sgn_right = np.sign(array[inicio_index+N])
while sgn_left == sgn_right:
sgn_left = np.sign(array[inicio_index-N])
sgn_right = np.sign(array[inicio_index+N])
N += 1
n = inicio_index + N # no arranco desde el max para evitar errores de cambio de signo
sgn_old = 1
sgn_new = 1
while sgn_old == sgn_new:
old = array[n]
sgn_old = np.sign(old)
n += 1
new = array[n]
sgn_new = np.sign(new)
#si no llega a cero pero es constante en variaciones de 1%, corta
#if np.abs((new-old)/old)<0.01:
# break
fin_index = n
n = inicio_index - N # no arranco desde el max para evitar errores de cambio de signo
sgn_old = 1
sgn_new = 1
while sgn_old == sgn_new and n>0:
sgn_old = np.sign(array[n])
n -= 1
sgn_new = np.sign(array[n])
ini_index=n
if not x_axis is None:
plt.figure(432)
plt.plot(x_axis,array*0)
plt.plot(x_axis,array)
plt.plot(x_axis[ini_index:fin_index],array[ini_index:fin_index])
plt.plot(x_axis[inicio_index+N],array[inicio_index+N], 'o')
plt.plot(x_axis[inicio_index],array[inicio_index], 'o')
plt.plot(x_axis[inicio_index-N],array[inicio_index-N], 'o')
return ini_index, fin_index
#------------------------------------------------------------------------------
def integrar(x, y, surf):
"""
integracion de superficie
x, y array 1d de coordenadas.
"""
I = np.trapz(surf, x=y, axis=0)
I = np.trapz(I, x=x)
return I
#------------------------------------------------------------------------------
plt.figure(100)
plt.contour(ppm_x, ppm_y, spec, 25, cmap='jet', vmax=5000000)
plt.show()
x = -3.0
y = 1.9
x_index, y_index = find_peak(ppmDir, ppmInd, spec, x, y, delta_x=0.8, delta_y=0.8)
spec_x = spec[y_index, :]
spec_y = spec[:, x_index]
#plt.figure(2)
#plt.plot(ppmDir, spec_x)
#plt.plot(ppmInd, spec_y)
dspec_x = derivar(spec_x)
dspec_y = derivar(spec_y)
dppmDir = ppmDir[range(dspec_x.size)]
dppmInd = ppmInd[range(dspec_y.size)]
#plt.figure(3)
#plt.plot(dppmDir, dspec_x)
#plt.plot(dppmInd, dspec_y)
ini_index_x, fin_index_x = find_zeros(dspec_x, x_index, x_axis=dppmDir)
plt.figure(4)
plt.plot(ppmDir, spec_x)
plt.plot(ppmDir[ini_index_x:fin_index_x], spec_x[ini_index_x:fin_index_x])
ini_index_y, fin_index_y = find_zeros(dspec_y, y_index, x_axis=dppmInd)
#plt.figure(5)
##plt.plot(ppmDir, spec_x)
#plt.plot(ppmInd[ini_index_y:fin_index_y], spec_y[ini_index_y:fin_index_y])
ini_index_y = 56
plt.figure(200)
plt.title('regiones a integrar')
plt.contour(ppm_x, ppm_y, spec, 25, cmap='jet', vmax=5000000)
#slices:
slice_x = ppm_x[ini_index_y:fin_index_y,ini_index_x:fin_index_x]
slice_y = ppm_y[ini_index_y:fin_index_y,ini_index_x:fin_index_x]
slice_spec = spec[ini_index_y:fin_index_y,ini_index_x:fin_index_x]
plt.contourf(slice_x, slice_y, slice_spec, cmap='jet')
plt.show()
slice_ppmDir = ppmDir[ini_index_x:fin_index_x]
slice_ppmInd = ppmInd[ini_index_y:fin_index_y]
I = integrar(slice_ppmDir, slice_ppmInd, slice_spec)
print(I)
xi_32 = ini_index_x
xf_32 = fin_index_x
yi_32 = ini_index_y
yf_32 = fin_index_y | [
"santimaldonadoochoa@gmail.com"
] | santimaldonadoochoa@gmail.com |
9fb9fc214255db4d526057385f0521a8acb7109e | a4ac13bf840b6591ce9eec224f5926181624cb55 | /crypt.py | 0a991fd3fb926e0d6117edd5becfd82e359bfacd | [] | no_license | master801/Innocent-Fille-MPK | c729d9d673e26ca0302903457d92beee90482b49 | 522a100f2a1ac79fc496669747843ccf3147845d | refs/heads/master | 2023-07-01T10:42:38.912143 | 2021-08-09T01:41:16 | 2021-08-09T01:41:16 | 394,111,831 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | #!/usr/bin/env python3
import zlib
def decrypt_entry(data: bytes):
# TODO
return data
def encrypt_entry(data: bytes):
# TODO
return data
def is_encrypted(data: bytes):
# TODO
return False
def is_compressed(data: bytes):
return data[:0x2] == b'\x78\x9C'
def decompress(data: bytes):
return zlib.decompress(data)
def compress(data: bytes):
# TODO
return zlib.compress(data, level=1)
| [
"master801@users.noreply.github.com"
] | master801@users.noreply.github.com |
01c988d748aab051fbe7f0defccb3d10859b7b1e | 488dbc496c55ad5a775d9dcbf3fe0f44b4b198b5 | /project.py | a70ff36904044ed332670947ed3f6a806582bef0 | [] | no_license | siloni07/Catalog-item-project | 804b0e2403076832f7a554af4e35a75dddfb75f7 | 2e59e8a5713556a664a47b0105a1a8977b4c881a | refs/heads/master | 2020-06-21T06:49:21.097681 | 2019-07-19T19:45:14 | 2019-07-19T19:45:14 | 197,373,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,592 | py | import requests
from flask import make_response
import json
import httplib2
from oauth2client.client import FlowExchangeError
from oauth2client.client import flow_from_clientsecrets
from database_setup import Base, Catalog, CatalogItem, User
from flask_httpauth import HTTPBasicAuth
from sqlalchemy.orm import sessionmaker,joinedload
from sqlalchemy import create_engine, asc, desc
from flask import Flask, render_template, request, redirect, jsonify, url_for, flash, session as login_session, g
import random
import string
app = Flask(__name__)
auth = HTTPBasicAuth()
# Connect to Database and create database session
engine = create_engine(
'sqlite:///catalogitem.db',
connect_args={
'check_same_thread': False})
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
CLIENT_ID = json.loads(
open('client_secret.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog WebApp"
@app.route('/login')
def showLogin():
state = ''.join(
random.choice(
string.ascii_uppercase +
string.digits) for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
print("entered in gconnect")
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secret.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print("Token's client ID does not match app's.")
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['provider'] = 'google'
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print("done!")
return output
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
if access_token is None:
print 'Access Token is None'
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
print 'In gdisconnect access token is %s', access_token
print 'User name is: '
print login_session['username']
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is '
print result
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(
json.dumps(
'Failed to revoke token for given user.',
400))
response.headers['Content-Type'] = 'application/json'
return response
# Show all catalogs
@app.route('/')
@app.route('/catalog')
def showCatalogs():
catalog = session.query(Catalog).order_by(asc(Catalog.name))
catalogItem = session.query(CatalogItem).order_by(desc(CatalogItem.id))
if 'username' not in login_session:
return render_template(
'publiccatalogs.html',
catalog=catalog,
catalogitem=catalogItem)
else:
return render_template(
'catalogs.html',
catalog=catalog,
catalogitem=catalogItem)
#Create new Item in specific catalog
@app.route('/catalog/<int:catalog_id>/newItem/', methods=['GET', 'POST'])
def newCatalog(catalog_id):
if 'username' not in login_session:
return redirect('/login')
catalog = session.query(Catalog).filter_by(id=catalog_id).one()
if request.method == 'POST':
newCatalog = CatalogItem(
title=request.form['title'],
description=request.form['description'],
catalog_id=catalog_id)
session.add(newCatalog)
session.commit()
flash('New Menu %s Item Successfully Created' % (newCatalog.title))
return redirect(url_for('showCatalogs'))
else:
return render_template('newItem.html', catalog=catalog)
#Show all menu items for specific catalog
@app.route('/catalog/<int:catalog_id>/MenuItem', methods=['GET'])
def showMenuItem(catalog_id):
catalog = session.query(Catalog).filter_by(id=catalog_id).one()
catalogItem = session.query(CatalogItem).filter_by(
catalog_id=catalog_id).all()
creator = getUserInfo(catalog.user_id)
if 'username' not in login_session:
return render_template(
'publicmenuitem.html',
catalogItem=catalogItem,
catalog=catalog)
else:
return render_template(
'showmenuitem.html',
catalogItem=catalogItem,
catalog=catalog)
#Edit menu Item for specific catalog
@app.route(
'/catalog/<int:menu_id>/<int:catalog_id>/EditItem',
methods=[
'GET',
'POST'])
def editMenuItem(menu_id, catalog_id):
if 'username' not in login_session:
return redirect('/catalog')
catalog = session.query(Catalog).filter_by(id=catalog_id).one()
catalogItem = session.query(CatalogItem).filter_by(id=menu_id).one()
if request.method == 'POST':
if request.form['title']:
catalogItem.title = request.form['title']
if request.form['description']:
catalogItem.description = request.form['description']
if request.form['category']:
catalogItem.price = request.form['category']
session.add(catalogItem)
session.commit()
flash('Menu Item Successfully Edited')
return redirect(
url_for(
'showMenuItem',
catalog_id=catalogItem.catalog_id))
else:
return render_template(
'editmenuitem.html',
catalogItem=catalogItem,
catalog=catalog)
#Delete menu item for specific catalog
@app.route(
'/catalog/<int:menu_id>/<int:catalog_id>/DeleteItem',
methods=[
'GET',
'POST'])
def deleteMenuItem(menu_id, catalog_id):
if 'username' not in login_session:
return redirect('/catalog')
catalog = session.query(Catalog).filter_by(id=catalog_id).one()
catalogItem = session.query(CatalogItem).filter_by(id=menu_id).one()
if request.method == 'POST':
session.delete(catalogItem)
session.commit()
flash('Menu Item Successfully Deleted')
return redirect(
url_for(
'showMenuItem',
catalog_id=catalogItem.catalog_id))
else:
return render_template(
'deletemenuitem.html',
catalogItem=catalogItem,
catalog=catalog)
#JSON endpoints
@app.route('/catalog.json')
def catalogMenuItemJSON():
categories = session.query(Catalog).options(joinedload(Catalog.catalog_items)).all()
return jsonify(Catalog=[dict(c.serialize, items=[i.serialize
for i in c.catalog_items])
for c in categories])
def createUser(login_session):
newUser = User(
name=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.menu_id
except BaseException:
return None
#To logout from gmail account
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
del login_session['gplus_id']
del login_session['access_token']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("You have successfully been logged out.")
return redirect(url_for('showCatalogs'))
else:
return redirect(url_for('showCatalogs'))
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.Debug = True
app.run(host='0.0.0.0', port=8000)
| [
"silonirs@gmail.com"
] | silonirs@gmail.com |
b9d8f783de1cbe1c4088bba5d1b6822b8742917c | 4772c9d6c39b3888c939826d6f728e4a00938537 | /lfmr.py | e5f1ecc692999c6a0071acb424ff3e4718c549f1 | [] | no_license | Praveenstein/Intern_Assignment | d8603b95a3a7634cb28f7a7f62dcfcb67a0aa483 | e8fe8e0d6c26aed7951e9b0ede94af065c89d2f7 | refs/heads/master | 2022-12-06T08:21:21.858355 | 2020-08-27T13:06:53 | 2020-08-27T13:06:53 | 285,256,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from functools import reduce
def main():
#lambda function to find the square of a number
square=lambda x: x**2
print(square(5))
#filter function to remove odd values in a list
lis=[1,2,3,4,5,6,7,8,9,10]
new_lis=list(filter(lambda x: x%2 == 0, lis))
print(new_lis)
#use of map function to find square of given list
lis = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
new_lis = list(map(lambda x: x**2, lis))
print(new_lis)
#reduce function to find the sum of number
lis = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
add=(reduce(lambda a,b: a+b, lis))
print("Sum using Reduce Function: ", add)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Praveenstein.noreply@github.com |
4eced8ea40a44c5213b1226f8d085f04e888c087 | fd19a87fcaed13a36c820f156df079981ef8cfd5 | /exponent.py | 7ecf48abfd73c9f203d81bc1d4ac4efecd2fe8b1 | [] | no_license | jhardingsd/python-2020 | 5ba48b1a086f35f74527b87ca806009263526e52 | ad79d2e32f63a5c86ffac8aff49836ed1f9b981a | refs/heads/master | 2022-12-15T19:39:03.217859 | 2020-09-10T01:28:42 | 2020-09-10T01:28:42 | 290,886,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | number = 2
exponent = 2
product = 1
for each_Pass in range(exponent):
product = product * number
print(product,'', end ='')
| [
"jhardingsd@gmail.com"
] | jhardingsd@gmail.com |
e9418ebaad3d4ae906853a3aff5909044e325261 | 91c45ff5a74b827ba244de20fe27408a9e4481c4 | /final_subtraction_quiz.py | 2c02004d2f2d561363eece6b105d4c8f4a0d3e05 | [] | no_license | Teddytmetalz/subtractionquiz1 | e5a1f52ce4deaca156b2ad9d00a7f91639e2e609 | 0038a87dea637f3fde210cf0aa6c35c54f507c59 | refs/heads/master | 2021-04-15T13:07:28.854546 | 2018-03-23T13:18:32 | 2018-03-23T13:18:32 | 126,487,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | import random
import time
# count the number of the correct answers
correctcount = 0
# count the total number of questions
count = 0
# constant
number_of_questions = 10
# get start
startTime = time.time()
while count < number_of_questions:
#two random integers
num1 = random.randint(0, 10)
num2 = random.randint(0, 10)
#answer
count += 1
ans = int(input("What is {0} - {1} ?".format(num1, num2)))#"what" + str(num1) + "-" + str(num2) + "?")
#display results
difference = (num1 - num2)
if (difference == ans):
print("correct")
correctcount += 1
else:
print("wrong.\n", num1, "-",num2, "is", num1 - num2)
# increase the count
#count += 1
# get end
endTime = time.time()
| [
"noreply@github.com"
] | Teddytmetalz.noreply@github.com |
532c3eceacb56c836bfc5e3f90d7a178fc0c160d | d3b3e46e3985cadb09936996a6549fd6e77f3851 | /utilities.py | b99d8ce3be85a0bf32c7c4e360ec30e25a945186 | [] | no_license | Grozby/AL326G | baac7becac1d1a30219bd6ac6b962dd0d39b3177 | 30a4b3f88151e3a04c6d4ca177e169c04011e3a3 | refs/heads/master | 2021-10-27T21:04:44.439096 | 2019-04-19T19:02:57 | 2019-04-19T19:02:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | import matplotlib.pyplot as mplt
def factorize(n):
factors = []
for i in range(1, n + 1):
if int(n/i) == n/i:
factors.append([i, int(n/i)])
return factors
def showimage(image):
"""displays a single image in SciView"""
mplt.figure()
mplt.imshow(image)
mplt.show() | [
"matteo.biasielli@mail.polimi.it"
] | matteo.biasielli@mail.polimi.it |
9c1e7be9edf9942f893030ed4955e91530284ec0 | 4a065dce45e1492fd7090411e12986202f2dd44e | /image_processing.py | d041e104405d87c50a3b4b86c3d65766dbb45fd8 | [] | no_license | charuj/diab_retin | 07259b85c9771ae1f377f89fa924f89b62616007 | 6fc07b675e0697b581d553b83e64bd914dfb8110 | refs/heads/master | 2021-01-11T03:31:51.239492 | 2016-11-17T22:54:03 | 2016-11-17T22:54:03 | 68,947,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,598 | py |
'''
Mac terminal script to reduce image sizes:
-get to the directory where the images are (using cd)
sips -Z 100 *.jpeg
- sips is the command being used
-Z tells it to maintain the aspect ratio
- 100 is the new size of the image
- *.jpeg instructs the computer to downsize every image ending in jpeg
'''
import numpy as np
from numpy import genfromtxt
import cPickle as pickle
from PIL import Image
# List all files of the directory containing the images
import os
from os import listdir
from os.path import isfile, join
# Open the csv which contains the labels for the images (i.e. the rating)
# import csv
# with open('trainLabels2.csv', 'rb') as csvfile:
# filereader= csv.reader(csvfile, delimiter=' ', quotechar='|')
#
image_labels = genfromtxt('trainLabels2.csv', delimiter=',') # convert CSV into array
print image_labels.shape
image_labels= image_labels[1:, :]
sample_labels= image_labels[:10,1]
np.delete(sample_labels, 2,0)
np.delete(sample_labels, 2, 0)
Y_sample= sample_labels.reshape([sample_labels.shape[0],1])
pickle.dump(Y_sample, open( "sampletarget_labels.p", "wb" ))
cwd = os.getcwd() # Get the current working directory (cwd)
newcwd = os.chdir("/Users/charujaiswal/PycharmProjects/diab_retin/sample")
files = os.listdir("/Users/charujaiswal/PycharmProjects/diab_retin/sample") # Get all the files in that directory
print("Files in '%s': %s" % (newcwd, files))
root= "/Users/charujaiswal/PycharmProjects/diab_retin/sample"
images_list= []
for item in os.listdir("/Users/charujaiswal/PycharmProjects/diab_retin/sample"):
if not item.startswith('.') and isfile(join(root, item)): # to get rid of the hidden file ".DS_Store"; http://stackoverflow.com/questions/15235823/how-to-ignore-hidden-files-in-python-functions
img= Image.open(item)
arr = np.array(img) # at this point each individual array will be 3D
pixels1D= arr.flatten()
pixels2D= pixels1D.reshape([pixels1D.shape[0], 1]) # turn 1D array into 2D that has shape (#, 1)... helps with later matrix mult
pixels2D= np.transpose(pixels2D)
images_list.append(pixels2D)
for i in range(len(images_list)):
print images_list[i].shape
## FOR SOME REASON resizing the images down didn't give all of the same shape, which then gives issues when vstacking
del images_list[2]
del images_list[2]
print "After Deletion"
for i in range(len(images_list)):
print images_list[i].shape
images_array = np.vstack(images_list) # build an array of all the images
print images_array.shape
pickle.dump(images_array, open( "file_images_array.p", "wb" ))
| [
"clmjaiswal@gmail.com"
] | clmjaiswal@gmail.com |
f4fd6672d70923d43d31ac47e197341fd4adff0a | 0132c46e1bcd4cbbad5eb63b6e1e3745dc16b9bf | /virtual/bin/django-admin | 3e8902e758c864f89ff09f73f6f54d26d461bd4e | [] | no_license | costamay/Bara-Blog | 17cb4115bbe7fcb2bbce0a3fccf1232f36bc2963 | 79f4598af7f5ef461bb0c9701245fabbc111f040 | refs/heads/development | 2022-11-27T13:57:03.109645 | 2020-08-05T16:25:18 | 2020-08-05T16:25:18 | 275,866,945 | 0 | 0 | null | 2020-06-29T16:34:18 | 2020-06-29T16:17:35 | Python | UTF-8 | Python | false | false | 315 | #!/home/moringa/Documents/django-projects/Bara-Blog/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"costamay"
] | costamay | |
57abb1d492da22ca8039ba1f34f17f15d3e3ae53 | de626f1892619968efbaa22ea26079ee2269e799 | /test/test_Master_dialog.py | 02100573c7b18bcfb48fb78df5fb2f3d2c296df2 | [] | no_license | gerardoros/CartograficoQgisPlugin | 7e8724cec0469d0494090b3557e9d4e967935121 | 844fa1052f435478e2e946099d7dbd6b1b97c311 | refs/heads/master | 2023-04-04T08:06:30.967894 | 2021-04-07T14:15:37 | 2021-04-07T14:15:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | # coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Master'
__date__ = '2018-04-27'
__copyright__ = 'Copyright 2018, Master'
import unittest
from PyQt5.QtGui import QDialogButtonBox, QDialog
from Master_dialog import MasterDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class MasterDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = MasterDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(MasterDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| [
"roslop_1841@hotmail.com"
] | roslop_1841@hotmail.com |
a96cb429f9ca4140e68c19d023448121c5c2e1c3 | 3ac84fa46db498e914f6e1aaf2eff490a63807a1 | /devstack/python-keystoneclient/keystoneclient/tests/auth/test_identity_common.py | 4a0cf572983e9f76f36dece4f158b8966f464bf9 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | onsoku/horizon_review | 3c5f1a8f863142f3f724f59771ad39604bca4c20 | 80cca0badc61b4754ef2c10f23a0ee48cd227445 | refs/heads/master | 2020-05-20T11:11:25.625186 | 2015-01-21T01:01:59 | 2015-01-21T01:01:59 | 29,002,325 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,422 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
import uuid
from oslo.utils import timeutils
import six
from keystoneclient import access
from keystoneclient.auth import base
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
from keystoneclient import fixture
from keystoneclient import session
from keystoneclient.tests import utils
@six.add_metaclass(abc.ABCMeta)
class CommonIdentityTests(object):
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
TEST_ROOT_ADMIN_URL = 'http://127.0.0.1:35357/'
TEST_COMPUTE_PUBLIC = 'http://nova/novapi/public'
TEST_COMPUTE_INTERNAL = 'http://nova/novapi/internal'
TEST_COMPUTE_ADMIN = 'http://nova/novapi/admin'
TEST_PASS = uuid.uuid4().hex
def setUp(self):
super(CommonIdentityTests, self).setUp()
self.TEST_URL = '%s%s' % (self.TEST_ROOT_URL, self.version)
self.TEST_ADMIN_URL = '%s%s' % (self.TEST_ROOT_ADMIN_URL, self.version)
self.TEST_DISCOVERY = fixture.DiscoveryList(href=self.TEST_ROOT_URL)
self.stub_auth_data()
@abc.abstractmethod
def create_auth_plugin(self, **kwargs):
"""Create an auth plugin that makes sense for the auth data.
It doesn't really matter what auth mechanism is used but it should be
appropriate to the API version.
"""
@abc.abstractmethod
def get_auth_data(self, **kwargs):
"""Return fake authentication data.
This should register a valid token response and ensure that the compute
endpoints are set to TEST_COMPUTE_PUBLIC, _INTERNAL and _ADMIN.
"""
def stub_auth_data(self, **kwargs):
token = self.get_auth_data(**kwargs)
self.stub_auth(json=token)
@abc.abstractproperty
def version(self):
"""The API version being tested."""
def test_discovering(self):
self.stub_url('GET', [],
base_url=self.TEST_COMPUTE_ADMIN,
json=self.TEST_DISCOVERY)
body = 'SUCCESS'
# which gives our sample values
self.stub_url('GET', ['path'], text=body)
a = self.create_auth_plugin()
s = session.Session(auth=a)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
new_body = 'SC SUCCESS'
# if we don't specify a version, we use the URL from the SC
self.stub_url('GET', ['path'],
base_url=self.TEST_COMPUTE_ADMIN,
text=new_body)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin'})
self.assertEqual(200, resp.status_code)
self.assertEqual(new_body, resp.text)
def test_discovery_uses_session_cache(self):
# register responses such that if the discovery URL is hit more than
# once then the response will be invalid and not point to COMPUTE_ADMIN
resps = [{'json': self.TEST_DISCOVERY}, {'status_code': 500}]
self.requests.register_uri('GET', self.TEST_COMPUTE_ADMIN, resps)
body = 'SUCCESS'
self.stub_url('GET', ['path'], text=body)
# now either of the two plugins I use, it should not cause a second
# request to the discovery url.
s = session.Session()
a = self.create_auth_plugin()
b = self.create_auth_plugin()
for auth in (a, b):
resp = s.get('/path',
auth=auth,
endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_discovery_uses_plugin_cache(self):
# register responses such that if the discovery URL is hit more than
# once then the response will be invalid and not point to COMPUTE_ADMIN
resps = [{'json': self.TEST_DISCOVERY}, {'status_code': 500}]
self.requests.register_uri('GET', self.TEST_COMPUTE_ADMIN, resps)
body = 'SUCCESS'
self.stub_url('GET', ['path'], text=body)
# now either of the two sessions I use, it should not cause a second
# request to the discovery url.
sa = session.Session()
sb = session.Session()
auth = self.create_auth_plugin()
for sess in (sa, sb):
resp = sess.get('/path',
auth=auth,
endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_discovering_with_no_data(self):
# which returns discovery information pointing to TEST_URL but there is
# no data there.
self.stub_url('GET', [],
base_url=self.TEST_COMPUTE_ADMIN,
status_code=400)
# so the url that will be used is the same TEST_COMPUTE_ADMIN
body = 'SUCCESS'
self.stub_url('GET', ['path'], base_url=self.TEST_COMPUTE_ADMIN,
text=body, status_code=200)
a = self.create_auth_plugin()
s = session.Session(auth=a)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_asking_for_auth_endpoint_ignores_checks(self):
a = self.create_auth_plugin()
s = session.Session(auth=a)
auth_url = s.get_endpoint(service_type='compute',
interface=base.AUTH_INTERFACE)
self.assertEqual(self.TEST_URL, auth_url)
def _create_expired_auth_plugin(self, **kwargs):
expires = timeutils.utcnow() - datetime.timedelta(minutes=20)
expired_token = self.get_auth_data(expires=expires)
expired_auth_ref = access.AccessInfo.factory(body=expired_token)
body = 'SUCCESS'
self.stub_url('GET', ['path'],
base_url=self.TEST_COMPUTE_ADMIN, text=body)
a = self.create_auth_plugin(**kwargs)
a.auth_ref = expired_auth_ref
return a
def test_reauthenticate(self):
a = self._create_expired_auth_plugin()
expired_auth_ref = a.auth_ref
s = session.Session(auth=a)
self.assertIsNot(expired_auth_ref, a.get_access(s))
def test_no_reauthenticate(self):
a = self._create_expired_auth_plugin(reauthenticate=False)
expired_auth_ref = a.auth_ref
s = session.Session(auth=a)
self.assertIs(expired_auth_ref, a.get_access(s))
def test_invalidate(self):
a = self.create_auth_plugin()
s = session.Session(auth=a)
# trigger token fetching
s.get_token()
self.assertTrue(a.auth_ref)
self.assertTrue(a.invalidate())
self.assertIsNone(a.auth_ref)
self.assertFalse(a.invalidate())
class V3(CommonIdentityTests, utils.TestCase):
@property
def version(self):
return 'v3'
def get_auth_data(self, **kwargs):
token = fixture.V3Token(**kwargs)
region = 'RegionOne'
svc = token.add_service('identity')
svc.add_standard_endpoints(admin=self.TEST_ADMIN_URL, region=region)
svc = token.add_service('compute')
svc.add_standard_endpoints(admin=self.TEST_COMPUTE_ADMIN,
public=self.TEST_COMPUTE_PUBLIC,
internal=self.TEST_COMPUTE_INTERNAL,
region=region)
return token
def stub_auth(self, subject_token=None, **kwargs):
if not subject_token:
subject_token = self.TEST_TOKEN
kwargs.setdefault('headers', {})['X-Subject-Token'] = subject_token
self.stub_url('POST', ['auth', 'tokens'], **kwargs)
def create_auth_plugin(self, **kwargs):
kwargs.setdefault('auth_url', self.TEST_URL)
kwargs.setdefault('username', self.TEST_USER)
kwargs.setdefault('password', self.TEST_PASS)
return v3.Password(**kwargs)
class V2(CommonIdentityTests, utils.TestCase):
@property
def version(self):
return 'v2.0'
def create_auth_plugin(self, **kwargs):
kwargs.setdefault('auth_url', self.TEST_URL)
kwargs.setdefault('username', self.TEST_USER)
kwargs.setdefault('password', self.TEST_PASS)
return v2.Password(**kwargs)
def get_auth_data(self, **kwargs):
token = fixture.V2Token(**kwargs)
region = 'RegionOne'
svc = token.add_service('identity')
svc.add_endpoint(self.TEST_ADMIN_URL, region=region)
svc = token.add_service('compute')
svc.add_endpoint(public=self.TEST_COMPUTE_PUBLIC,
internal=self.TEST_COMPUTE_INTERNAL,
admin=self.TEST_COMPUTE_ADMIN,
region=region)
return token
def stub_auth(self, **kwargs):
self.stub_url('POST', ['tokens'], **kwargs)
class CatalogHackTests(utils.TestCase):
TEST_URL = 'http://keystone.server:5000/v2.0'
OTHER_URL = 'http://other.server:5000/path'
IDENTITY = 'identity'
BASE_URL = 'http://keystone.server:5000/'
V2_URL = BASE_URL + 'v2.0'
V3_URL = BASE_URL + 'v3'
def test_getting_endpoints(self):
disc = fixture.DiscoveryList(href=self.BASE_URL)
self.stub_url('GET',
['/'],
base_url=self.BASE_URL,
json=disc)
token = fixture.V2Token()
service = token.add_service(self.IDENTITY)
service.add_endpoint(public=self.V2_URL,
admin=self.V2_URL,
internal=self.V2_URL)
self.stub_url('POST',
['tokens'],
base_url=self.V2_URL,
json=token)
v2_auth = v2.Password(self.V2_URL,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
sess = session.Session(auth=v2_auth)
endpoint = sess.get_endpoint(service_type=self.IDENTITY,
interface='public',
version=(3, 0))
self.assertEqual(self.V3_URL, endpoint)
def test_returns_original_when_discover_fails(self):
token = fixture.V2Token()
service = token.add_service(self.IDENTITY)
service.add_endpoint(public=self.V2_URL,
admin=self.V2_URL,
internal=self.V2_URL)
self.stub_url('POST',
['tokens'],
base_url=self.V2_URL,
json=token)
self.stub_url('GET', [], base_url=self.BASE_URL, status_code=404)
v2_auth = v2.Password(self.V2_URL,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
sess = session.Session(auth=v2_auth)
endpoint = sess.get_endpoint(service_type=self.IDENTITY,
interface='public',
version=(3, 0))
self.assertEqual(self.V2_URL, endpoint)
| [
"onsoku@onsoku.sakura.ne.j"
] | onsoku@onsoku.sakura.ne.j |
20753a5eec6cbef1d2f88ca4dca223a00463326f | ef9dfb78938ecf500f2378a84eca8051255a836c | /star-travel/venv/bin/pycodestyle | 93fb5f501bc9e2c6985163ea18cf05d0f8008eee | [] | no_license | lesage20/djangoinitiation | cd11ed6a1cb16356075f4af3be8a93db31ba8c9f | 818e3593d16c1fac5b2741605a4675d7833c18b2 | refs/heads/master | 2021-02-16T18:24:02.846455 | 2020-03-11T04:44:24 | 2020-03-11T04:44:24 | 245,033,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/Users/nan/Desktop/star-travel/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
| [
"angezanou00@gmail.com"
] | angezanou00@gmail.com | |
20b1801175bc4ba61a0f456454a4b37b260c5e9c | f1ff65235142c14c3c027cf5ff300a926c50a071 | /center/admin.py | e2a57be8e034fa3c2ad93391203bd16cf7d50984 | [] | no_license | Shehab-Magdy/Django-MyOnlineStore | 15a51843266e8afecb32437aa0b7d0b556d12d1c | 062dd9379be96a30959bab62fc70c2adeca3ca41 | refs/heads/master | 2020-11-23T21:09:39.018599 | 2020-01-25T11:11:10 | 2020-01-25T11:11:10 | 227,821,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | from django.contrib import admin
from .models import Category, Product, Like, Account
from django.contrib.auth.admin import UserAdmin
# Register your models here.
admin.site.register(Category)
admin.site.register(Product)
class AccountAdmin(UserAdmin):
list_display=('email','username','date_joined','last_login','is_admin','is_staff')
search_fields=('email','username')
readonly_fields=('date_joined','last_login')
fieldsets=()
filter_horizontal=()
list_filter=()
admin.site.register(Account, AccountAdmin) | [
"cegres1@hotmail.com"
] | cegres1@hotmail.com |
b26103ab46dc18e9fbbd785126f2ce5eecec5845 | 4d1e8f320ef3480300734fdbd5a8db74e7fbaf15 | /material/frontend/views/__init__.py | f05e68547701f87c80c31bfe835757d26d6ae1a2 | [
"Apache-2.0"
] | permissive | kaajavi/itsventory | c956e727f32530d1ef10455eec1a3ed0c32ed487 | 0543f402e124828c2af662f73361c68fc45002a2 | refs/heads/master | 2020-03-16T12:34:52.955622 | 2018-05-08T22:15:33 | 2018-05-08T22:15:33 | 132,670,159 | 0 | 0 | Apache-2.0 | 2018-05-08T22:16:54 | 2018-05-08T22:04:58 | CSS | UTF-8 | Python | false | false | 343 | py | from .create import CreateModelView
from .delete import DeleteModelView
from .detail import DetailModelView
from .list import ListModelView
from .update import UpdateModelView
from .viewset import ModelViewSet
__all__ = [
'CreateModelView', 'ListModelView', 'UpdateModelView',
'DeleteModelView', 'DetailModelView', 'ModelViewSet',
]
| [
"javierguignard@gmail.com"
] | javierguignard@gmail.com |
df052ef6da6da5333f04bee7a2281b062e7a0b9e | 47660118ba7ac2e8c908d783497bed029eeda3e3 | /2019/day_14.py | 2929cb0cf46869e3f4aa1009071a07dad716a4b4 | [] | no_license | conorpbrady/advent_of_code | c9a11e0c71d7ff2b38e20094c4f463e9acf6bb57 | 87552edbf0cde0445eeb23563d50503ae2f8918a | refs/heads/master | 2020-12-13T21:43:17.697335 | 2020-02-15T00:30:48 | 2020-02-15T00:30:48 | 234,539,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,787 | py | import math
class Reaction:
def split_equation(self, s):
i = s.find(" => ")
l = s[:i]
r = s[i+4:]
return l, r
def split_coeff(self, s):
l = s.split()
return (int(l[0]), l[1])
def split_reactants(self, s):
f_list = []
r_list = s.split(', ')
for r in r_list:
f_list.append(self.split_coeff(r))
return f_list
def __init__(self, s):
left, right = self.split_equation(s)
self.reactants = self.split_reactants(left)
full_product = self.split_coeff(right)
self.product = full_product[1]
self.pc = int(full_product[0])
def find_reaction(prd):
for reaction in reactions:
if reaction.product == prd:
return reaction
return 1
def tabs(level):
s = ''
for i in range(0,level):
s += ' '
return s
def find_reactants(coeff, rct, level=0):
global ore_sum
indent = tabs(level)
reaction = find_reaction(rct)
pc = reaction.pc
if reaction.product in resource_store:# and reaction.reactants[0][1] != "ORE":
#print("{}Using {} {} from resource store".format(indent, resource_store[reaction.product], reaction.product))
coeff -= resource_store[reaction.product]
resource_store[reaction.product] = 0
reactions_needed = math.ceil(coeff / pc)
remainder = (reactions_needed * pc) - coeff
if(remainder > 0):
if reaction.product in resource_store:
resource_store[reaction.product] += remainder
else:
resource_store[reaction.product] = remainder
#print("{}To produce {} {}, we need:".format(indent, coeff, reaction.product))
#print("{}Leaving {} {} as a remainder".format(indent, remainder, reaction.product))
for reactant in reaction.reactants:
ratio = reactions_needed * reactant[0]
remainder -= ratio
#print("{}{} {}".format(indent, ratio, reactant[1]))
#print("{}{} {} left over".format(indent, remainder, reactant[1]))
if reactant[1] == "ORE":
ore_sum += ratio
# if rct in raw_materials:
# raw_materials[rct] += coeff
# else:
# raw_materials[rct] = coeff
else:
find_reactants(ratio, reactant[1], level+1)
return
filename = 'day_14.txt'
#filename = 'd14test.txt'
ri = []
with open(filename) as f:
for line in f:
ri.append(line)
reactions = []
resource_store = {}
for line in ri:
reactions.append(Reaction(line))
ore_sum = 0
fuel_count = -1
one_trillion = 1000000000000
while ore_sum < one_trillion:
find_reactants(1, "FUEL")
fuel_count += 1
print(round(100 * ore_sum / one_trillion, 2))
#print(resource_store)
print(fuel_count)
| [
"conorpbrady@gmail.com"
] | conorpbrady@gmail.com |
5cd0af956746fdf41b72a09364359383e2e3f0ef | bbdd52993d4b9a07216ac60e1f5fe7853959f93b | /demo/innerPark/main.py | 0a5b5441c914111c4b427823a254a09e1dbabbba | [] | no_license | htfei/py_base | 1b07e8aac6bf8ff7f11483d4d8253f1ca1818f63 | 739a384aae7fa1fdf295f5527a96725d77fba528 | refs/heads/master | 2022-01-27T05:48:43.379959 | 2019-08-18T14:26:44 | 2019-08-18T14:26:44 | 119,955,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # coding=utf-8
'''
# 0、Python语言HelloWorld
print("hello python!")
# 1、定义变量
a = 2
b = 3
c = a+b
print(c)
# 2、判断语句
if c > 6:
print("你好!")
elif c < 6:
print("hello python!" )
# 3、循环
for i in range(0, 3):
print("nihao {0},{1}".format(i,"gepi"))
# 4、定义函数
# 5、面向对象
# 6、引入Python文件1
import mylib
h = mylib.Hello('李四')
h.sayhello()
# h.sayhi() #error
# 6、引入Python文件2
from mylib import Hello, Hi
h = Hello(10)
h.sayhello()
h1 = Hi("张三")
h1.sayhi()
h1.sayhello()
'''
import time
a = time.time()
print(str(a))
print(str(int(a)))
print(u'当前系统时间:' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(a)))
| [
"flyte2012@163.com"
] | flyte2012@163.com |
eb03def41e508f407282c5c9cd1659be83d4a274 | 5e30515112038a29daa13f25c23cfb1df7ff7fe0 | /15_TheCapStone/inference_midas.py | c60229ebcac1cfc171e674ccf37e84fc7f93d783 | [
"MIT"
] | permissive | nikshrimali/TSAI_EVA | ab365ac7a17c88b61f1d6d811132b73e6fb60778 | 1724ea28764416c294f68fe8ef1e664935202ae0 | refs/heads/master | 2023-02-02T10:44:37.175506 | 2020-12-27T03:30:33 | 2020-12-27T03:30:33 | 287,570,874 | 0 | 3 | null | 2020-09-09T04:56:25 | 2020-08-14T15:55:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,390 | py | import os
import glob
import torch
from models_all.midas.utils import *
import cv2
from torchvision.transforms import Compose
from midas.midas_net import MidasNet
from midas.transforms import Resize, NormalizeImage, PrepareForNet
os.chdir(r"D:\Python Projects\EVA\15_TheCapStone\models_all\midas")
def run(input_path, output_path, model):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
# load network
transform = Compose(
[
Resize(
384,
384,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="upper_bound",
image_interpolation_method=cv2.INTER_CUBIC,
),
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet(),
]
)
model.to(device)
model.eval()
# get input
img_names = glob.glob(os.path.join(input_path, "*"))
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
for ind, img_name in enumerate(img_names):
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
_, prediction = model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
# output
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
write_depth(filename, prediction, bits=2)
print("finished")
| [
"nshrimali21@gmail.com"
] | nshrimali21@gmail.com |
808d17aebe999b1dba0d19734a917f65313edf28 | cdec1b5c5d6830e9636378400441686db8ccd95c | /Codes/networkx.py | f2875652fa5e695f8215c2d6b51c5a440c5a84ae | [] | no_license | cansyl/Isoform-spesific-PI3K-inhibitor-analysis | 491d404b2c74238b9964fa86fc6716beca42b18e | 5c0b9a7ce1d965ddc6e6f3890fade0f810e64899 | refs/heads/master | 2021-09-08T21:42:23.763422 | 2021-09-06T08:36:51 | 2021-09-06T08:36:51 | 206,241,593 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | import networkx as nx
import sys
import argparse
parser = argparse.ArgumentParser(description='Calculate centrality metrics for PCST .sif network')
args = parser.parse_args()
parser.add_argument('-sif','--siffile', help='SIF, Network file. Can be output from OmicsIntegrator',required=True)
parser.add_argument('-o','--out', help='Output txt file',required=True)
g = nx.read_edgelist(args.siffile, create_using=None, delimiter='\tpp\t', nodetype=str, data=True, edgetype=None, encoding='utf-8')
output_file = str(args.out)
sys.stdout=open(args.out,"w")
degree_c = nx.degree_centrality(g)
pageR = nx.pagerank(g)
eigen_c = nx.eigenvector_centrality_numpy(g)
closeness_c = nx.closeness_centrality(g)
betweenness_c = nx.betweenness_centrality(g)
print "Node Name" + '\t' + "Degree Centrality" + '\t' + "Closeness Centrality" + '\t' + "Betweenness Centrality" + '\t' + "EigenVector Centrality" + '\t' + "Pagerank"
for node in degree_c:
print str(node) + '\t' + str(degree_c[node]) + '\t' + str(closeness_c[node]) + '\t' + str(betweenness_c[node]) + '\t' + str(eigen_c[node]) + '\t' + str(pageR[node])
| [
"noreply@github.com"
] | cansyl.noreply@github.com |
9d67bb08e90b77fca7e88d0b87b6d516b67c9573 | 33c88441ac68e8c0d35c22ff6e2b7ffbf7179444 | /src/raw_img.py | 44933fcdece9a36e4ae8dc415d4df855fb4fae00 | [] | no_license | faujpli/zae_test | cf2ba198edc5c37f166595b2c7bf67b498d07e7e | 4baca40615ce987f9c156b99a6593631cd9f3e21 | refs/heads/master | 2020-03-12T00:05:16.648839 | 2018-06-08T13:40:06 | 2018-06-08T13:40:06 | 130,340,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,613 | py | '''
Created on Mar 9, 2018
@author: jingpeng
'''
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
from skimage import exposure
import scipy.misc
from setup import *
from matching import segment_modules
# compute the image quality based on its sharpness
def compute_quality(img):
m,n = img.shape
img = np.float32(img)
F = cv2.dft(img,flags=cv2.DFT_COMPLEX_OUTPUT)
Fc = np.fft.fftshift(F) # shifting the origin of F to center
AF = np.abs(Fc)
M = AF.max()
thres = M/1000
Th = (F>thres).sum()
FM = np.float(Th)/(m*n)
#print(FM)
return FM
# compute the amoutn of blur in the image
def compute_blur(img):
fm = cv2.Laplacian(img, cv2.CV_64F).var()
#print(fm)
# show the image
#cv2.putText(img, str(fm), (100, 300),
# cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 3)
#cv2.imshow("Image", img)
#cv2.waitKey(300)
return fm
# read the raw image data and manipulate the memory
def test_raw_img():
raw = open(work_dir+'test.raw', 'rb')
f = np.fromfile(raw, dtype=np.uint32, count=rows*cols*3)
im = f.reshape(rows,cols,3) #notice row, column format
p2, p98 = np.percentile(im, (2,98))
im = exposure.rescale_intensity(im, in_range=(p2, p98))
plt.axis('off')
plt.tight_layout()
plt.imshow(im[:,:,1],cmap='gray')
plt.show()
# read in raw video data
def test_raw_video(raw_name):
# number of images
raw_size = os.path.getsize(raw_name)
img_num = raw_size / (rows*cols*2) # 4 (32 bits) or 2 (16 bits)
num = int(img_num)
raw = open(raw_name, 'rb')
f = np.fromfile(raw, dtype=np.uint16, count=rows*cols*(offset+num))
# normalize the intensities to be in [0,255]
f = 255.*(f - f.min())/(f.max()-f.min())
fm = []
for i in range(offset,offset+num):
start = rows*cols*i
end = rows*cols*(i+1)
img = f[start:end].reshape(rows,cols)
#qf = format(compute_quality(img), '.6f')
#qf =format(compute_blur(img), '.4f')
# contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img = exposure.rescale_intensity(img, in_range=(p2, p98))
fm1 =format(1000*compute_quality(img), '.5f')
fm2 =format(compute_blur(img), '.5f')
text = fm1+' '+fm2
fm.append(str(i)+' '+text)
#scipy.misc.imsave(raw_dir+str(i)+'.jpg', img) # save to jpg file
#font = cv2.FONT_HERSHEY_SIMPLEX
#cv2.putText(img, text, (0, 25),
# cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1)
#cv2.imshow('Image', img)
#cv2.waitKey(100)
with open(work_dir+os.path.basename(raw_name).split('.')[0]+'_quality.txt', 'w') as f:
for s in fm:
print(s, file=f)
# save raw video to jpg images
def save_raw_to_jpg(raw_name):
raw_size = os.path.getsize(raw_name)
img_num = raw_size / (rows*cols*2) # 4 (32 bits) or 2 (16 bits)
offset = 0
num = int(img_num)
raw = open(raw_name, 'rb')
f = np.fromfile(raw, dtype=np.int16, count=rows*cols*(offset+num))
# normalize the intensities to be in [0,255]
#f = 255.*(f - f.min())/(f.max()-f.min())
for i in range(offset,offset+num):
start = rows*cols*i
end = rows*cols*(i+1)
img = f[start:end].reshape(rows,cols)
# contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img = exposure.rescale_intensity(img, in_range=(p2, p98))
scipy.misc.imsave(raw_img_dir+str(i)+'.jpg', img) # save to jpg file
# read raw image data and convert it to jpg image
# Given: file path, dimensions
# Return: the covnerted image
def raw_to_raw(raw_name, rows, cols):
raw = open(raw_name, 'rb')
f = np.fromfile(raw, dtype=np.uint16)
img = 255.*(f - f.min())/(f.max()-f.min())
img = np.reshape(img, (rows,cols)).astype(np.uint16)
p2, p98 = np.percentile(img, (2, 98))
img = exposure.rescale_intensity(img, in_range=(p2, p98))
dir = os.path.dirname(raw_name)+'/'
name = os.path.basename(raw_name).split('.')[0]
scipy.misc.imsave(dir+name+'.jpg', img)
indices = []
indices.append(name)
segment_modules(dir, indices)
#cv2.imshow('img',img)
cv2.waitKey(0)
# save as a jpg image
jpg_name = raw_name.split('.')[0]
jpg_name += '.jpg'
#scipy.misc.imsave(jpg_name, img)
print("good")
return img_dir+'test.raw'
# short program to test how image size influence the image quality
def test_qf(filename):
img = cv2.imread(filename, 0)
l = min(img.shape)
for i in range(1, l, 15):
roi = img[:i,:]
qf = format(compute_blur(roi), '0.4f')
print(qf)
cv2.imshow('img', roi)
cv2.waitKey(200)
# short program to test the effectiveness of the function - compute_blur
def test_compute_blur():
filenames = sorted(os.listdir(match_modules), key=lambda x: (int(x.split('_')[0]), int(x.split('.')[0].split('_')[1])))
fm = []
for name in filenames:
img = cv2.imread(match_modules+name,0)
fm1 =format(10000*compute_quality(img), '.7f')
fm2 =format(compute_blur(img), '.3f')
text = fm1+' '+fm2
fm.append(name+' '+text)
print(name,text)
# show the image
cv2.putText(img, text, (0, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1)
cv2.imshow("Image", img)
if cv2.waitKey(150) & 0xFF == ord('q'):
break
with open(work_dir+'compare_image_quality.txt', 'w') as f:
for s in fm:
print(s, file=f)
# TODO:
# transform the raw image
# input: raw image
def raw_to_persp(img1):
img = cv2.imread(match_res+'module 0.jpg', 0)
pass
#test_dir = '/media/jingpeng/Maxtor/raw_videos/Arzberg/'
test_dir = '/media/jingpeng/Maxtor/20180507_BDTP_Pressig/test/'
#res = find_best_FM(im)
#test_raw_img()
#test_raw_video(work_dir+'LockIn_Velox_10m_2ms_100FPS.raw')
#raw_to_jpg(work_dir+'32bit.raw', 512, 640)
#test_qf(work_dir+'lena.png')
#test_compute_blur()
save_raw_to_jpg(test_dir+'20180507_WR9,1B_8,54A_5ms_3209-11698.raw')
#raw_to_raw(work_dir+'raw_img.raw', 512, 640)
#test_vidoes_path = '/media/jingpeng/Maxtor/raw_videos/Speed/test/'
#for video_file in os.listdir(test_vidoes_path):
# print(video_file)
# test_raw_video(test_vidoes_path+video_file)
| [
"jingpeng.li@fau.de"
] | jingpeng.li@fau.de |
6b7e9fcc5d4e182b99bfb7c105327e72b40b1170 | ae582ba0af41e0e809bf01fe1dbbb369cded223b | /.flask_testes/lib/python3.7/struct.py | 4336eaf338b0afa22a155abfcf54eb2377f81b8b | [] | no_license | FelipeEmerim/flask_testes | 49304223437d1b29cc25059f649329979e45a013 | 5950b8913199420d2b0044828e62b7f4c7285aae | refs/heads/master | 2020-03-22T07:51:01.073991 | 2018-07-19T13:46:30 | 2018-07-19T13:46:30 | 139,728,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | /home/emerim/.pyenv/versions/3.7.0/lib/python3.7/struct.py | [
"lip.emerim@gmail.com"
] | lip.emerim@gmail.com |
9eb258cdc6b65a9919a795440b6beb5e84713442 | bdd40ea113fdf2f04ef7d61a096a575322928d1d | /Rupesh/opencv/Opencv_Python/Probabilistic_hough_line_L4_6.py | dd657873e7607cacae08499456287fe764d0fb17 | [] | no_license | rupesh7399/rupesh | 3eebf924d33790c29636ad59433e10444b74bc2f | 9b746acf37ab357c147cdada1de5458c5fc64f53 | refs/heads/master | 2020-12-22T05:01:29.176696 | 2020-03-03T10:32:36 | 2020-03-03T10:32:36 | 202,111,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | import cv2
import numpy as np
# Grayscale and Canny Edges extracted
image = cv2.imread('images/soduku.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 100, 170, apertureSize = 3)
# Again we use the same rho and theta accuracies
# However, we specific a minimum vote (pts along line) of 100
# and Min line length of 5 pixels and max gap between lines of 10 pixels
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 200, 5, 10)
print(lines.shape)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(image, (x1, y1), (x2, y2),(0, 255, 0), 3)
cv2.imshow('Probabilistic Hough Lines', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"rupesh7399@gmail.com"
] | rupesh7399@gmail.com |
700d87cb74a2cd087f84aba3b9e72437ec8f0300 | 8dbb7d4a57a29550568ea9b005ade6542bb546fd | /baidu-spider/setting.py | 6b7cf57e85f3e0f1b010ad7af1e0b45f29d2f094 | [] | no_license | kalicc/feapder_project | 96e4b0d575d61967fff445d62267abe0d1b0d7af | 19820eb7da8f9cec32a11bdcfc47232917743896 | refs/heads/master | 2023-08-05T00:44:36.822548 | 2021-09-24T10:02:53 | 2021-09-24T10:02:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | # -*- coding: utf-8 -*-
"""爬虫配置文件"""
# import os
# import sys
#
# MYSQL
MYSQL_IP = "localhost"
MYSQL_PORT = 3306
MYSQL_DB = "feapder"
MYSQL_USER_NAME = "feapder"
MYSQL_USER_PASS = "feapder123"
#
# # MONGODB
# MONGO_IP = os.getenv("MONGO_IP", "localhost")
# MONGO_PORT = int(os.getenv("MONGO_PORT", 27017))
# MONGO_DB = os.getenv("MONGO_DB")
# MONGO_USER_NAME = os.getenv("MONGO_USER_NAME")
# MONGO_USER_PASS = os.getenv("MONGO_USER_PASS")
#
# REDIS
# ip:port 多个可写为列表或者逗号隔开 如 ip1:port1,ip2:port2 或 ["ip1:port1", "ip2:port2"]
REDISDB_IP_PORTS = "localhost:6379"
# REDISDB_USER_PASS = os.getenv("REDISDB_USER_PASS")
# # 默认 0 到 15 共16个数据库
# REDISDB_DB = int(os.getenv("REDISDB_DB", 0))
# # 适用于redis哨兵模式
# REDISDB_SERVICE_NAME = os.getenv("REDISDB_SERVICE_NAME")
#
# # 数据入库的pipeline,可自定义,默认MysqlPipeline
# ITEM_PIPELINES = [
# "feapder.pipelines.mysql_pipeline.MysqlPipeline",
# # "feapder.pipelines.mongo_pipeline.MongoPipeline",
# ]
#
# # 爬虫相关
# # COLLECTOR
# COLLECTOR_SLEEP_TIME = 1 # 从任务队列中获取任务到内存队列的间隔
# COLLECTOR_TASK_COUNT = 10 # 每次获取任务数量
#
# # SPIDER
# SPIDER_THREAD_COUNT = 1 # 爬虫并发数
# SPIDER_SLEEP_TIME = 0 # 下载时间间隔 单位秒。 支持随机 如 SPIDER_SLEEP_TIME = [2, 5] 则间隔为 2~5秒之间的随机数,包含2和5
# SPIDER_TASK_COUNT = 1 # 每个parser从内存队列中获取任务的数量
# SPIDER_MAX_RETRY_TIMES = 100 # 每个请求最大重试次数
# AUTO_STOP_WHEN_SPIDER_DONE = True # 爬虫是否自动结束
#
# # 浏览器渲染
# WEBDRIVER = dict(
# pool_size=1, # 浏览器的数量
# load_images=True, # 是否加载图片
# user_agent=None, # 字符串 或 无参函数,返回值为user_agent
# proxy=None, # xxx.xxx.xxx.xxx:xxxx 或 无参函数,返回值为代理地址
# headless=False, # 是否为无头浏览器
# driver_type="CHROME", # CHROME、PHANTOMJS、FIREFOX
# timeout=30, # 请求超时时间
# window_size=(1024, 800), # 窗口大小
# executable_path=None, # 浏览器路径,默认为默认路径
# render_time=0, # 渲染时长,即打开网页等待指定时间后再获取源码
# custom_argument=["--ignore-certificate-errors"], # 自定义浏览器渲染参数
# )
#
# # 爬虫启动时,重新抓取失败的requests
# RETRY_FAILED_REQUESTS = False
# # 保存失败的request
# SAVE_FAILED_REQUEST = True
# # request防丢机制。(指定的REQUEST_LOST_TIMEOUT时间内request还没做完,会重新下发 重做)
# REQUEST_LOST_TIMEOUT = 600 # 10分钟
# # request网络请求超时时间
# REQUEST_TIMEOUT = 22 # 等待服务器响应的超时时间,浮点数,或(connect timeout, read timeout)元组
#
# # 下载缓存 利用redis缓存,但由于内存大小限制,所以建议仅供开发调试代码时使用,防止每次debug都需要网络请求
# RESPONSE_CACHED_ENABLE = False # 是否启用下载缓存 成本高的数据或容易变需求的数据,建议设置为True
# RESPONSE_CACHED_EXPIRE_TIME = 3600 # 缓存时间 秒
# RESPONSE_CACHED_USED = False # 是否使用缓存 补采数据时可设置为True
#
# # 设置代理
# PROXY_EXTRACT_API = None # 代理提取API ,返回的代理分割符为\r\n
# PROXY_ENABLE = True
#
# # 随机headers
# RANDOM_HEADERS = True
# # UserAgent类型 支持 'chrome', 'opera', 'firefox', 'internetexplorer', 'safari',若不指定则随机类型
# USER_AGENT_TYPE = "chrome"
# # 默认使用的浏览器头 RANDOM_HEADERS=True时不生效
# DEFAULT_USERAGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
# # requests 使用session
# USE_SESSION = False
#
# # 去重
# ITEM_FILTER_ENABLE = False # item 去重
# REQUEST_FILTER_ENABLE = False # request 去重
#
# # 报警 支持钉钉、企业微信、邮件
# # 钉钉报警
# DINGDING_WARNING_URL = "" # 钉钉机器人api
# DINGDING_WARNING_PHONE = "" # 报警人 支持列表,可指定多个
# # 邮件报警
# EMAIL_SENDER = "" # 发件人
# EMAIL_PASSWORD = "" # 授权码
# EMAIL_RECEIVER = "" # 收件人 支持列表,可指定多个
# EMAIL_SMTPSERVER = "smtp.163.com" # 邮件服务器 默认为163邮箱
# # 企业微信报警
# WECHAT_WARNING_URL = "" # 企业微信机器人api
# WECHAT_WARNING_PHONE = "" # 报警人 将会在群内@此人, 支持列表,可指定多人
# WECHAT_WARNING_ALL = False # 是否提示所有人, 默认为False
# # 时间间隔
# WARNING_INTERVAL = 3600 # 相同报警的报警时间间隔,防止刷屏; 0表示不去重
# WARNING_LEVEL = "DEBUG" # 报警级别, DEBUG / ERROR
# WARNING_FAILED_COUNT = 1000 # 任务失败数 超过WARNING_FAILED_COUNT则报警
#
# LOG_NAME = os.path.basename(os.getcwd())
# LOG_PATH = "log/%s.log" % LOG_NAME # log存储路径
# LOG_LEVEL = "DEBUG"
# LOG_COLOR = True # 是否带有颜色
# LOG_IS_WRITE_TO_CONSOLE = True # 是否打印到控制台
# LOG_IS_WRITE_TO_FILE = False # 是否写文件
# LOG_MODE = "w" # 写文件的模式
# LOG_MAX_BYTES = 10 * 1024 * 1024 # 每个日志文件的最大字节数
# LOG_BACKUP_COUNT = 20 # 日志文件保留数量
# LOG_ENCODING = "utf8" # 日志文件编码
# OTHERS_LOG_LEVAL = "ERROR" # 第三方库的log等级
#
# # 切换工作路径为当前项目路径
# project_path = os.path.abspath(os.path.dirname(__file__))
# os.chdir(project_path) # 切换工作路经
# sys.path.insert(0, project_path)
# print('当前工作路径为 ' + os.getcwd())
| [
"boris@bzkj.tech"
] | boris@bzkj.tech |
94a71e9a1cd844f036ee97547f4bbee604826f1e | 0bbf41e3bcaecaed1f91ccbf8c6afc47abe2b9cb | /age_calculator.py | d7bd2c2f8a8a73b131e27277b2a64df82a91d596 | [
"MIT"
] | permissive | armedev/Basics_python | 7706f63361f9cc9e5625070e285e0feb5bc68078 | 3eb175dfcc3489b14f6f98c1f8e63010f670d86c | refs/heads/master | 2022-03-24T12:29:35.520267 | 2019-10-23T12:59:35 | 2019-10-23T12:59:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | #age_calculator_and_voter_predictor
ww = int(input("your date of birth:> "))
if ww in range(0, 5000):
print(ww, "-birth year")
if ww <= 2019:
age = 2019 - int(ww)
else:
age = 0.1
if age == 1:
yr = "year"
print("you have completed your", age, yr)
elif age == 0:
print("you are just months older or you have not born yet")
elif age == 0.1:
print("you have not born yet")
else:
yr = "year's"
print("you have completed your", age, yr)
if age > 18:
print('''you have the right to vote''')
elif age == 18:
print("you are the voter for the next year")
elif age == 0:
print("you failed to provide your correct birth-date")
elif age == 0.1:
print("you failed to provide your correct birth-date")
else:
print("But you are under 18 year's")
else:
print(f"you entered string(letters).")
| [
"arbazahmed7672"
] | arbazahmed7672 |
cf7cc762a91e412a454f0eecae8e043c0037e9e6 | 01d7b673a01d097f4c6643ba6e25e1d291d0ee01 | /Homework 3/fileprob.py | d2da14fbb86aba417df51d18237a749fdc9ef9c7 | [] | no_license | KomyoSanzo/Processing-Language-Natural | c7baf03ad8aca3c9ba36336df4f575988c97fb8f | f89a0c9073a768337807311ee1bb633147294c02 | refs/heads/master | 2021-01-10T16:46:43.754589 | 2016-04-30T03:35:29 | 2016-04-30T03:35:29 | 53,104,449 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | #!/usr/bin/python
# Sample program for hw-lm
# CS465 at Johns Hopkins University.
# Converted to python by Eric Perlman <eric@cs.jhu.edu>
# Updated by Jason Baldridge <jbaldrid@mail.utexas.edu> for use in NLP
# course at UT Austin. (9/9/2008)
# Modified by Mozhi Zhang <mzhang29@jhu.edu> to add the new log linear model
# with word embeddings. (2/17/2016)
import math
import sys
import Probs
# Computes the log probability of the sequence of tokens in file,
# according to a trigram model. The training source is specified by
# the currently open corpus, and the smoothing method used by
# prob() is specified by the global variable "smoother".
def main():
course_dir = '/usr/local/data/cs465/'
argv = sys.argv[1:]
if len(argv) < 2:
print """
Prints the log-probability of each file under a smoothed n-gram model.
Usage: %s smoother lexicon trainpath files...
Example: %s add0.01 %shw-lm/lexicons/words-10.txt switchboard-small %shw-lm/speech/sample*
Possible values for smoother: uniform, add1, backoff_add1, backoff_wb, loglinear1
(the \"1\" in add1/backoff_add1 can be replaced with any real lambda >= 0
the \"1\" in loglinear1 can be replaced with any C >= 0 )
lexicon is the location of the word vector file, which is only used in the loglinear model
trainpath is the location of the training corpus
(the search path for this includes "%s")
""" % (sys.argv[0], sys.argv[0], course_dir, course_dir, Probs.DEFAULT_TRAINING_DIR)
sys.exit(1)
smoother = argv.pop(0)
lexicon = argv.pop(0)
train_file = argv.pop(0)
if not argv:
print "warning: no input files specified"
lm = Probs.LanguageModel()
lm.set_smoother(smoother)
lm.read_vectors(lexicon)
lm.train(train_file)
# We use natural log for our internal computations and that's
# the kind of log-probability that fileLogProb returns.
# But we'd like to print a value in bits: so we convert
# log base e to log base 2 at print time, by dividing by log(2).
for testfile in argv:
print "%g\t%s" % (lm.filelogprob(testfile) / math.log(2), testfile)
if __name__ == "__main__":
main()
| [
"wwangsta@gmail.com"
] | wwangsta@gmail.com |
ad17af3de70a96211a6b7c29e6538fc80f86a7c2 | 231bb8ee7a41c6522b186e9b875cce9d6f21575d | /Problem 2( GC content).py | 251d16bd0036cadf8a390303641b85209638b779 | [] | no_license | Fathu1998/DNA-string-Complement_and_GC_content | b6d26e27a8d3208ffade89162f64d8ceef4872ca | 528b3a04f0575297104b5b6c4ce351ce17df701c | refs/heads/master | 2021-03-03T20:26:22.577244 | 2020-03-15T15:38:28 | 2020-03-15T15:38:28 | 245,985,551 | 0 | 0 | null | 2020-03-09T08:50:36 | 2020-03-09T08:50:36 | null | UTF-8 | Python | false | false | 260 | py | DNA=input("Enter the DNA sequence/sequences")
G=C=0
for i in DNA:
if i=="G":
G=G+1
if i=="C":
C=C+1
GC_percent=(G+C)*100/len(DNA)
print("The GC% of the given DNA sequence is calculated to be" ,GC_percent)
| [
"noreply@github.com"
] | Fathu1998.noreply@github.com |
636d1ce57b8bab3e8a414c97c5c669ec29525873 | 72852e07bb30adbee608275d6048b2121a5b9d82 | /algorithms/problem_0923/leetcode1.py | 72ed6b267b3710694b4c0b138370af274cbdb672 | [] | no_license | drlongle/leetcode | e172ae29ea63911ccc3afb815f6dbff041609939 | 8e61ddf06fb3a4fb4a4e3d8466f3367ee1f27e13 | refs/heads/master | 2023-01-08T16:26:12.370098 | 2023-01-03T09:08:24 | 2023-01-03T09:08:24 | 81,335,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | class Solution(object):
def threeSumMulti(self, A, target):
MOD = 10**9 + 7
ans = 0
A.sort()
for i, x in enumerate(A):
# We'll try to find the number of i < j < k
# with A[j] + A[k] == T, where T = target - A[i].
# The below is a "two sum with multiplicity".
T = target - A[i]
j, k = i+1, len(A) - 1
while j < k:
# These steps proceed as in a typical two-sum.
if A[j] + A[k] < T:
j += 1
elif A[j] + A[k] > T:
k -= 1
# These steps differ:
elif A[j] != A[k]: # We have A[j] + A[k] == T.
# Let's count "left": the number of A[j] == A[j+1] == A[j+2] == ...
# And similarly for "right".
left = right = 1
while j + 1 < k and A[j] == A[j+1]:
left += 1
j += 1
while k - 1 > j and A[k] == A[k-1]:
right += 1
k -= 1
# We contributed left * right many pairs.
ans += left * right
ans %= MOD
j += 1
k -= 1
else:
# M = k - j + 1
# We contributed M * (M-1) / 2 pairs.
ans += (k-j+1) * (k-j) / 2
ans %= MOD
break
return ans
| [
"drlongle@gmail.com"
] | drlongle@gmail.com |
c624db327e351b58c163be9c5581a3b6da482442 | cc75f278846363dfa23d35e0a580cf2bfde700d1 | /pytorch3d/io/mtl_io.py | f194b976b437de447635a34d0df6897064ca736f | [
"BSD-3-Clause"
] | permissive | 2429581027/pytorch3d | 78a0da94484f09f778a867d897e38905f355b60f | 5d65a0cf8c9a1fb755fd09ce098bcedb0c670d80 | refs/heads/master | 2022-12-25T12:42:44.861890 | 2020-10-06T22:53:45 | 2020-10-06T22:55:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,197 | py | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""This module implements utility functions for loading .mtl files and textures."""
import os
import warnings
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from pytorch3d.io.utils import _open_file, _read_image
def make_mesh_texture_atlas(
material_properties: Dict,
texture_images: Dict,
face_material_names,
faces_verts_uvs: torch.Tensor,
texture_size: int,
texture_wrap: Optional[str],
) -> torch.Tensor:
"""
Given properties for materials defined in the .mtl file, and the face texture uv
coordinates, construct an (F, R, R, 3) texture atlas where R is the texture_size
and F is the number of faces in the mesh.
Args:
material_properties: dict of properties for each material. If a material
does not have any properties it will have an emtpy dict.
texture_images: dict of material names and texture images
face_material_names: numpy array of the material name corresponding to each
face. Faces which don't have an associated material will be an empty string.
For these faces, a uniform white texture is assigned.
faces_verts_uvs: LongTensor of shape (F, 3, 2) giving the uv coordinates for each
vertex in the face.
texture_size: the resolution of the per face texture map returned by this function.
Each face will have a texture map of shape (texture_size, texture_size, 3).
texture_wrap: string, one of ["repeat", "clamp", None]
If `texture_wrap="repeat"` for uv values outside the range [0, 1] the integer part
is ignored and a repeating pattern is formed.
If `texture_wrap="clamp"` the values are clamped to the range [0, 1].
If None, do nothing.
Returns:
atlas: FloatTensor of shape (F, texture_size, texture_size, 3) giving the per
face texture map.
"""
# Create an R x R texture map per face in the mesh
R = texture_size
F = faces_verts_uvs.shape[0]
# Initialize the per face texture map to a white color.
# TODO: allow customization of this base color?
# pyre-fixme[16]: `Tensor` has no attribute `new_ones`.
atlas = faces_verts_uvs.new_ones(size=(F, R, R, 3))
# Check for empty materials.
if not material_properties and not texture_images:
return atlas
if texture_wrap == "repeat":
# If texture uv coordinates are outside the range [0, 1] follow
# the convention GL_REPEAT in OpenGL i.e the integer part of the coordinate
# will be ignored and a repeating pattern is formed.
# Shapenet data uses this format see:
# https://shapenet.org/qaforum/index.php?qa=15&qa_1=why-is-the-texture-coordinate-in-the-obj-file-not-in-the-range # noqa: B950
# pyre-fixme[16]: `ByteTensor` has no attribute `any`.
if (faces_verts_uvs > 1).any() or (faces_verts_uvs < 0).any():
msg = "Texture UV coordinates outside the range [0, 1]. \
The integer part will be ignored to form a repeating pattern."
warnings.warn(msg)
# pyre-fixme[9]: faces_verts_uvs has type `Tensor`; used as `int`.
# pyre-fixme[58]: `%` is not supported for operand types `Tensor` and `int`.
faces_verts_uvs = faces_verts_uvs % 1
elif texture_wrap == "clamp":
# Clamp uv coordinates to the [0, 1] range.
faces_verts_uvs = faces_verts_uvs.clamp(0.0, 1.0)
# Iterate through the material properties - not
# all materials have texture images so this has to be
# done separately to the texture interpolation.
for material_name, props in material_properties.items():
# Bool to indicate which faces use this texture map.
faces_material_ind = torch.from_numpy(face_material_names == material_name).to(
faces_verts_uvs.device
)
if faces_material_ind.sum() > 0:
# For these faces, update the base color to the
# diffuse material color.
if "diffuse_color" not in props:
continue
atlas[faces_material_ind, ...] = props["diffuse_color"][None, :]
# Iterate through the materials used in this mesh. Update the
# texture atlas for the faces which use this material.
# Faces without texture are white.
for material_name, image in list(texture_images.items()):
# Only use the RGB colors
if image.shape[2] == 4:
image = image[:, :, :3]
# Reverse the image y direction
image = torch.flip(image, [0]).type_as(faces_verts_uvs)
# Bool to indicate which faces use this texture map.
faces_material_ind = torch.from_numpy(face_material_names == material_name).to(
faces_verts_uvs.device
)
# Find the subset of faces which use this texture with this texture image
uvs_subset = faces_verts_uvs[faces_material_ind, :, :]
# Update the texture atlas for the faces which use this texture.
# TODO: should the texture map values be multiplied
# by the diffuse material color (i.e. use *= as the atlas has
# been initialized to the diffuse color)?. This is
# not being done in SoftRas.
atlas[faces_material_ind, :, :] = make_material_atlas(image, uvs_subset, R)
return atlas
def make_material_atlas(
image: torch.Tensor, faces_verts_uvs: torch.Tensor, texture_size: int
) -> torch.Tensor:
r"""
Given a single texture image and the uv coordinates for all the
face vertices, create a square texture map per face using
the formulation from [1].
For a triangle with vertices (v0, v1, v2) we can create a barycentric coordinate system
with the x axis being the vector (v0 - v2) and the y axis being the vector (v1 - v2).
The barycentric coordinates range from [0, 1] in the +x and +y direction so this creates
a triangular texture space with vertices at (0, 1), (0, 0) and (1, 0).
The per face texture map is of shape (texture_size, texture_size, 3)
which is a square. To map a triangular texture to a square grid, each
triangle is parametrized as follows (e.g. R = texture_size = 3):
The triangle texture is first divided into RxR = 9 subtriangles which each
map to one grid cell. The numbers in the grid cells and triangles show the mapping.
..code-block::python
Triangular Texture Space:
1
|\
|6 \
|____\
|\ 7 |\
|3 \ |4 \
|____\|____\
|\ 8 |\ 5 |\
|0 \ |1 \ |2 \
|____\|____\|____\
0 1
Square per face texture map:
R ____________________
| | | |
| 6 | 7 | 8 |
|______|______|______|
| | | |
| 3 | 4 | 5 |
|______|______|______|
| | | |
| 0 | 1 | 2 |
|______|______|______|
0 R
The barycentric coordinates of each grid cell are calculated using the
xy coordinates:
..code-block::python
The cartesian coordinates are:
Grid 1:
R ____________________
| | | |
| 20 | 21 | 22 |
|______|______|______|
| | | |
| 10 | 11 | 12 |
|______|______|______|
| | | |
| 00 | 01 | 02 |
|______|______|______|
0 R
where 02 means y = 0, x = 2
Now consider this subset of the triangle which corresponds to
grid cells 0 and 8:
..code-block::python
1/R ________
|\ 8 |
| \ |
| 0 \ |
|_______\|
0 1/R
The centroids of the triangles are:
0: (1/3, 1/3) * 1/R
8: (2/3, 2/3) * 1/R
For each grid cell we can now calculate the centroid `(c_y, c_x)`
of the corresponding texture triangle:
- if `(x + y) < R`, then offsett the centroid of
triangle 0 by `(y, x) * (1/R)`
- if `(x + y) > R`, then offset the centroid of
triangle 8 by `((R-1-y), (R-1-x)) * (1/R)`.
This is equivalent to updating the portion of Grid 1
above the diagnonal, replacing `(y, x)` with `((R-1-y), (R-1-x))`:
..code-block::python
R _____________________
| | | |
| 20 | 01 | 00 |
|______|______|______|
| | | |
| 10 | 11 | 10 |
|______|______|______|
| | | |
| 00 | 01 | 02 |
|______|______|______|
0 R
The barycentric coordinates (w0, w1, w2) are then given by:
..code-block::python
w0 = c_x
w1 = c_y
w2 = 1- w0 - w1
Args:
image: FloatTensor of shape (H, W, 3)
faces_verts_uvs: uv coordinates for each vertex in each face (F, 3, 2)
texture_size: int
Returns:
atlas: a FloatTensor of shape (F, texture_size, texture_size, 3) giving a
per face texture map.
[1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based
3D Reasoning', ICCV 2019
"""
R = texture_size
device = faces_verts_uvs.device
rng = torch.arange(R, device=device)
# Meshgrid returns (row, column) i.e (Y, X)
# Change order to (X, Y) to make the grid.
Y, X = torch.meshgrid(rng, rng)
# pyre-fixme[28]: Unexpected keyword argument `axis`.
grid = torch.stack([X, Y], axis=-1) # (R, R, 2)
# Grid cells below the diagonal: x + y < R.
below_diag = grid.sum(-1) < R
# map a [0, R] grid -> to a [0, 1] barycentric coordinates of
# the texture triangle centroids.
bary = torch.zeros((R, R, 3), device=device) # (R, R, 3)
slc = torch.arange(2, device=device)[:, None]
# w0, w1
bary[below_diag, slc] = ((grid[below_diag] + 1.0 / 3.0) / R).T
# w0, w1 for above diagonal grid cells.
# pyre-fixme[16]: `float` has no attribute `T`.
bary[~below_diag, slc] = (((R - 1.0 - grid[~below_diag]) + 2.0 / 3.0) / R).T
# w2 = 1. - w0 - w1
bary[..., -1] = 1 - bary[..., :2].sum(dim=-1)
# Calculate the uv position in the image for each pixel
# in the per face texture map
# (F, 1, 1, 3, 2) * (R, R, 3, 1) -> (F, R, R, 3, 2) -> (F, R, R, 2)
uv_pos = (faces_verts_uvs[:, None, None] * bary[..., None]).sum(-2)
# bi-linearly interpolate the textures from the images
# using the uv coordinates given by uv_pos.
textures = _bilinear_interpolation_vectorized(image, uv_pos)
return textures
def _bilinear_interpolation_vectorized(
image: torch.Tensor, grid: torch.Tensor
) -> torch.Tensor:
"""
Bi linearly interpolate the image using the uv positions in the flow-field
grid (following the naming conventions for torch.nn.functional.grid_sample).
This implementation uses the same steps as in the SoftRas cuda kernel
to make it easy to compare. This vectorized version requires less memory than
_bilinear_interpolation_grid_sample but is slightly slower.
If speed is an issue and the number of faces in the mesh and texture image sizes
are small, consider using _bilinear_interpolation_grid_sample instead.
Args:
image: FloatTensor of shape (H, W, D) a single image/input tensor with D
channels.
grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
points at which to sample a value in the image. The grid values must
be in the range [0, 1]. u is the x direction and v is the y direction.
Returns:
out: FloatTensor of shape (N, H, W, D) giving the interpolated
D dimensional value from image at each of the pixel locations in grid.
"""
H, W, _ = image.shape
# Convert [0, 1] to the range [0, W-1] and [0, H-1]
grid = grid * torch.tensor([W - 1, H - 1]).type_as(grid)
weight_1 = grid - grid.int()
weight_0 = 1.0 - weight_1
grid_x, grid_y = grid.unbind(-1)
y0 = grid_y.to(torch.int64)
y1 = (grid_y + 1).to(torch.int64)
x0 = grid_x.to(torch.int64)
x1 = x0 + 1
weight_x0, weight_y0 = weight_0.unbind(-1)
weight_x1, weight_y1 = weight_1.unbind(-1)
# Bi-linear interpolation
# griditions = [[y, x], [(y+1), x]
# [y, (x+1)], [(y+1), (x+1)]]
# weights = [[wx0*wy0, wx0*wy1],
# [wx1*wy0, wx1*wy1]]
out = (
image[y0, x0] * (weight_x0 * weight_y0)[..., None]
+ image[y1, x0] * (weight_x0 * weight_y1)[..., None]
+ image[y0, x1] * (weight_x1 * weight_y0)[..., None]
+ image[y1, x1] * (weight_x1 * weight_y1)[..., None]
)
return out
def _bilinear_interpolation_grid_sample(
image: torch.Tensor, grid: torch.Tensor
) -> torch.Tensor:
"""
Bi linearly interpolate the image using the uv positions in the flow-field
grid (following the conventions for torch.nn.functional.grid_sample).
This implementation is faster than _bilinear_interpolation_vectorized but
requires more memory so can cause OOMs. If speed is an issue try this function
instead.
Args:
image: FloatTensor of shape (H, W, D) a single image/input tensor with D
channels.
grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
points at which to sample a value in the image. The grid values must
be in the range [0, 1]. u is the x direction and v is the y direction.
Returns:
out: FloatTensor of shape (N, H, W, D) giving the interpolated
D dimensional value from image at each of the pixel locations in grid.
"""
N = grid.shape[0]
# convert [0, 1] to the range [-1, 1] expected by grid_sample.
grid = grid * 2.0 - 1.0
image = image.permute(2, 0, 1)[None, ...].expand(N, -1, -1, -1) # (N, 3, H, W)
# Align_corners has to be set to True to match the output of the SoftRas
# cuda kernel for bilinear sampling.
out = F.grid_sample(image, grid, mode="bilinear", align_corners=True)
return out.permute(0, 2, 3, 1)
MaterialProperties = Dict[str, Dict[str, torch.Tensor]]
TextureFiles = Dict[str, str]
TextureImages = Dict[str, torch.Tensor]
def _parse_mtl(f, device="cpu") -> Tuple[MaterialProperties, TextureFiles]:
material_properties = {}
texture_files = {}
material_name = ""
with _open_file(f, "r") as f:
for line in f:
tokens = line.strip().split()
if not tokens:
continue
if tokens[0] == "newmtl":
material_name = tokens[1]
material_properties[material_name] = {}
elif tokens[0] == "map_Kd":
# Diffuse texture map
# Account for the case where filenames might have spaces
filename = line.strip()[7:]
texture_files[material_name] = filename
elif tokens[0] == "Kd":
# RGB diffuse reflectivity
kd = np.array(tokens[1:4]).astype(np.float32)
kd = torch.from_numpy(kd).to(device)
material_properties[material_name]["diffuse_color"] = kd
elif tokens[0] == "Ka":
# RGB ambient reflectivity
ka = np.array(tokens[1:4]).astype(np.float32)
ka = torch.from_numpy(ka).to(device)
material_properties[material_name]["ambient_color"] = ka
elif tokens[0] == "Ks":
# RGB specular reflectivity
ks = np.array(tokens[1:4]).astype(np.float32)
ks = torch.from_numpy(ks).to(device)
material_properties[material_name]["specular_color"] = ks
elif tokens[0] == "Ns":
# Specular exponent
ns = np.array(tokens[1:4]).astype(np.float32)
ns = torch.from_numpy(ns).to(device)
material_properties[material_name]["shininess"] = ns
return material_properties, texture_files
def _load_texture_images(
material_names: List[str],
data_dir: str,
material_properties: MaterialProperties,
texture_files: TextureFiles,
) -> Tuple[MaterialProperties, TextureImages]:
final_material_properties = {}
texture_images = {}
# Only keep the materials referenced in the obj.
for material_name in material_names:
if material_name in texture_files:
# Load the texture image.
path = os.path.join(data_dir, texture_files[material_name])
if os.path.isfile(path):
image = _read_image(path, format="RGB") / 255.0
image = torch.from_numpy(image)
texture_images[material_name] = image
else:
msg = f"Texture file does not exist: {path}"
warnings.warn(msg)
if material_name in material_properties:
final_material_properties[material_name] = material_properties[
material_name
]
return final_material_properties, texture_images
def load_mtl(
f, material_names: List[str], data_dir: str, device="cpu"
) -> Tuple[MaterialProperties, TextureImages]:
"""
Load texture images and material reflectivity values for ambient, diffuse
and specular light (Ka, Kd, Ks, Ns).
Args:
f: a file-like object of the material information.
material_names: a list of the material names found in the .obj file.
data_dir: the directory where the material texture files are located.
Returns:
material_properties: dict of properties for each material. If a material
does not have any properties it will have an empty dict.
{
material_name_1: {
"ambient_color": tensor of shape (1, 3),
"diffuse_color": tensor of shape (1, 3),
"specular_color": tensor of shape (1, 3),
"shininess": tensor of shape (1)
},
material_name_2: {},
...
}
texture_images: dict of material names and texture images
{
material_name_1: (H, W, 3) image,
...
}
"""
material_properties, texture_files = _parse_mtl(f, device)
return _load_texture_images(
material_names, data_dir, material_properties, texture_files
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e4274d2937a984ca11f5a7c0f1d24179b955a7dd | b74e34c58a363c7514c0eb243dbd1eb33abef939 | /消费群组代码/数据处理代码/mePy/behavior/LibsvmTest.py | ef7f95783dac7cacb0bc6d69b84508ef66ff1363 | [] | no_license | cathyxl/Project-Consumer-Groups | c49330bb52447ee2f2fcd1596d4976b850487f11 | ee23d4ca003d3e4d083541efdeb7c70deb2fd791 | refs/heads/master | 2021-09-10T20:06:00.321862 | 2018-04-01T08:25:34 | 2018-04-01T08:25:34 | 90,232,933 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | "测试Libsvm包"
from svmutil import *
import csv
PATH = 'D:\\Consume Group\\experiment\\csv_12_18\\'
trainfile = PATH + 'train_processed\\train_feature_train_o.csv'
testfile = PATH + 'processed_feature\\3_features.csv'
def read_svm_train_data(train_file):
labels = []
train = []
with open(train_file) as ftr:
train_reader = csv.reader(ftr)
for row in train_reader:
labels.append(int(row[1]))
dataline = []
for i in range(2, 34):
dataline.append(float(row[i]))
train.append(dataline)
return train, labels
# for t in train:
# print(t)
# # print(train)
# print(labels)
def read_svm_test_data(test_file):
labels = []
test = []
with open(test_file) as ftr:
train_reader = csv.reader(ftr)
for row in train_reader:
labels.append(1)
dataline = []
for i in range(1, 34):
dataline.append(float(row[i]))
test.append(dataline)
return test, labels
def svm_classifier(train,labels):
# y, x = svm_read_problem(svm_file)
model = svm_train(labels, train)
# from sklearn.svm import SVC
# model = SVC(kernel='linear')
# model.fit(train, labels)
return model
train, labels = read_svm_train_data(trainfile)
train_model = svm_classifier(train, labels)
test, labels = read_svm_test_data(testfile)
testlablel, p_acc, p_val= svm_predict(labels, test, train_model)
print(testlablel) | [
"237768202@qq.com"
] | 237768202@qq.com |
dbbdd80325b36ade3a92fc51452029f8d9e7b58e | d4ca03693383a5bc20bcdf7b7d552bba1d5467ff | /prepare_3comp.py | c3f7453dccf62501b6eac3430daf838f8eb00c62 | [] | no_license | samhaug/beamform_code | abaa1c723fec6143523b4bf8c05e7daa2655584d | 3bb201356056c1ee91a141b057ff7c666171de2a | refs/heads/master | 2023-01-19T22:15:02.272484 | 2020-11-24T23:05:47 | 2020-11-24T23:05:47 | 289,100,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,717 | py | import obspy
from os.path import isdir
from subprocess import call
from obspy.taup import TauPyModel
import numpy as np
from matplotlib import pyplot as plt
model = TauPyModel(model='prem')
if not isdir("./z_comp"):
call("mkdir ./z_comp",shell=True)
if not isdir("./e_comp"):
call("mkdir ./e_comp",shell=True)
if not isdir("./n_comp"):
call("mkdir ./n_comp",shell=True)
print('reading')
z = obspy.read('data/*BHZ*')
n = obspy.read('data/*BHN*')
e = obspy.read('data/*BHE*')
print('read')
z_l = []
n_l = []
e_l = []
all_l = []
z.interpolate(6)
n.interpolate(6)
e.interpolate(6)
#z.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
#n.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
#e.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
z.detrend()
n.detrend()
e.detrend()
# Trim data to numsamp samples. Remove shorter traces
numsamp=3882
z_netw=[]
z_stat=[]
z_loc=[]
for idx,tr in enumerate(z):
z[idx].data = z[idx].data[0:numsamp]
if len(z[idx].data) != numsamp:
z_stat.append(z[idx].stats.station)
z_netw.append(z[idx].stats.network)
z_loc.append(z[idx].stats.location)
for ii in range(0,len(z_netw)):
z.remove(z.select(station=z_stat[ii],network=z_netw[ii],location=z_loc[ii])[0])
n_netw=[]
n_stat=[]
n_loc=[]
for idx,tr in enumerate(n):
n[idx].data = n[idx].data[0:numsamp]
if len(n[idx].data) != numsamp:
n_stat.append(n[idx].stats.station)
n_netw.append(n[idx].stats.network)
n_loc.append(n[idx].stats.location)
for ii in range(0,len(n_netw)):
n.remove(n.select(station=n_stat[ii],network=n_netw[ii],location=n_loc[ii])[0])
e_netw=[]
e_stat=[]
e_loc=[]
for idx,tr in enumerate(e):
e[idx].data = e[idx].data[0:numsamp]
if len(tr.data) != numsamp:
e_stat.append(e[idx].stats.station)
e_netw.append(e[idx].stats.network)
e_loc.append(e[idx].stats.location)
for ii in range(0,len(e_netw)):
e.remove(e.select(station=e_stat[ii],network=e_netw[ii],location=e_loc[ii])[0])
#Remove duplicates
for tr in z:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in z_l:
z_l.append(name)
else:
z.remove(tr)
for tr in n:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in n_l:
n_l.append(name)
else:
n.remove(tr)
for tr in e:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in e_l:
e_l.append(name)
else:
e.remove(tr)
z_l = []
n_l = []
e_l = []
all_l = []
#Make list of each trace
for tr in z:
z_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
for tr in n:
n_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
for tr in e:
e_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
#Remove traces not common to all three components
for i in (set(all_l)-set(z_l)):
try:
for tr in n.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
n.remove(tr)
except:
pass
try:
for tr in e.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
e.remove(tr)
except:
continue
for i in (set(all_l)-set(n_l)):
try:
for tr in z.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
z.remove(tr)
except:
pass
try:
for tr in e.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
e.remove(tr)
except:
pass
for i in (set(all_l)-set(e_l)):
try:
for tr in n.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
n.remove(tr)
except:
pass
try:
for tr in z.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
z.remove(tr)
except:
pass
z.sort(['network','station','location'])
n.sort(['network','station','location'])
e.sort(['network','station','location'])
#print("Aligning on P")
#for idx,tr in enumerate(z):
# gcarc = tr.stats.sac['gcarc']
# if tr.stats.sac['evdp'] > 1000:
# tr.stats.sac['evdp'] *= 1/1000.
# h = tr.stats.sac['evdp']
# t = model.get_travel_times(source_depth_in_km=h,
# distance_in_degree=gcarc,
# phase_list=['ttp'])[0].time
# s = tr.stats.sampling_rate
# w = tr.data[int((t-20)*s):int((t+20)*s)]
# l = int(len(w)/2.)
# p1 = np.argmax(np.abs(w))
# z[idx].data = np.roll(z[idx].data,l-p1)
# e[idx].data = np.roll(e[idx].data,l-p1)
# n[idx].data = np.roll(n[idx].data,l-p1)
#z.differentiate()
#n.differentiate()
#e.differentiate()
for tr in z:
tr.write('z_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
for tr in n:
if tr.stats.sac['evdp'] > 1000:
tr.stats.sac['evdp'] *= 1/1000.
tr.write('n_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
for tr in e:
if tr.stats.sac['evdp'] > 1000:
tr.stats.sac['evdp'] *= 1/1000.
tr.write('e_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
| [
"samhaug@umich.edu"
] | samhaug@umich.edu |
378f25c491719cd94e303ae0346ba2c59e22b785 | db4e4bc5c3626da0063508350862ccfebca7022a | /scripts/1.py | 622d07d00cfbb585da84b6e566ec1545b8ee80a7 | [] | no_license | MuzaffarSoliyev/music | 69fd25bc012e57f8b4b407e2f73d421491da6616 | 2e696639a0212f137eaa0b90c7e4451f96ccb9e1 | refs/heads/master | 2020-03-17T19:34:11.595586 | 2018-05-17T21:10:06 | 2018-05-17T21:10:06 | 133,866,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | import gc
import os
import ast
import sys
import configparser
import librosa
import graphviz
import numpy as np
import pygame
from tkinter import *
from PIL import Image
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import keras
from keras import backend as K
from keras.models import load_model
from keras.utils import plot_model
from keras.utils.vis_utils import model_to_dot
from audiomanip.audiostruct import AudioStruct
from audiomanip.audiomodels import ModelZoo
from audiomanip.audioutils import AudioUtils
from audiomanip.audioutils import MusicDataGenerator
import multiprocessing.dummy as multiprocessing
def analize():
model = load_model("../models/gtzan_hguimaraes.h5")
model.load_weights("music_tagger_cnn.h5")
song_samples = 660000
sn_fft = 2048
shop_length = 512
sgenres = {'metal': 0, 'disco': 1, 'classical': 2, 'hiphop': 3, 'jazz': 4}
signal, sr = librosa.load(file_name)
# Calculate the melspectrogram of the audio and use log scale
melspec = librosa.feature.melspectrogram(signal[:song_samples], sr = sr, n_fft = sn_fft, hop_length = shop_length).T[:128,]
melspec = melspec[None,:]
y = model.predict(melspec)
pr = np.array(y)
index = np.argmax(pr)
return index
#plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
clicks = 0
file_name = ""
file_name1 = ""
id = 0
def main():
global id
if(id == 0):
print("metal")
im = Image.open('metal.jpg')
im.show()
#im.close()
if(id==1):
print("disco")
im = Image.open('disco.jpg')
im.show()
#im.close()
if(id==2):
print("classical")
im = Image.open('classical.jpg')
im.show()
#im.close()
if(id == 3):
print("hiphop")
im = Image.open('hiphop.jpg')
im.show()
#im.close()
if(id == 4):
print("jazz")
im = Image.open('jazz.jpg')
im.show()
def click_buttom1():
global file_name1
global file_name
global id
file_name = '../dataset/123/1.au'
file_name1 = '1.mp3'
id = analize()
p = multiprocessing.Pool()
p.map(lambda f: f(),[main, play1])
p.close()
p.join()
root = Tk()
root.title("Музыкальный классификатор")
root.geometry("300x400")
btn1 = Button(text="1", background="#555", foreground="#ccc", padx="60", pady="8", font="20", activebackground="#666", activeforeground = "#ddd", command=click_buttom1)
btn1.pack()
root.mainloop()
| [
"muzaffar_soliyev@mail.ru"
] | muzaffar_soliyev@mail.ru |
4657907d92c2d3cfce0d9eba8af849e653c57d19 | fd9d43f9a2826c1c985472d1067317c8d399d3e2 | /qa/rpc-tests/test_framework/util.py | ed2771d4601b389047fc4c84faef3b5ffeda8fc0 | [
"MIT"
] | permissive | Bankitt-Official/bankitt | 006cd394c202ad1f58d253fc4fc2f8c4d1db03a0 | ba6ec5c2d7099eeafab972634e9dddd676b0503f | refs/heads/master | 2020-03-07T11:08:23.628577 | 2018-06-09T09:40:25 | 2018-06-09T09:40:25 | 127,448,326 | 0 | 1 | MIT | 2018-05-12T06:14:49 | 2018-03-30T16:16:35 | C++ | UTF-8 | Python | false | false | 21,631 | py | # Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2017 The Bankitt Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bankitt.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bankittd to start. This means that RPC is accessible and fully initialized.
Raise an exception if bankittd exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('bankittd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run bankittds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BANKITTD", "bankittd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: bankittd started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bankitt.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bankittd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("BANKITTD", "bankittd")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: bankittd started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bankittds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| [
"you@example.com"
] | you@example.com |
fd63524205e878b38ec89f3dc9234285d4d205c8 | db24aad1b8e813a23688cfef4603978e3356b155 | /fastagram/posts/models/__init__.py | 678b999d2e2f24295eb5a61c01ff7b53a8c99bf2 | [] | no_license | yevgnenll/fastagram | b251a9f5ce1b6b30c09c3d5ab3d23a7388a35509 | 0923588e2d6b1e8a2e15bde0e130e64765bcc0ed | refs/heads/develop | 2021-01-10T01:14:16.196153 | 2016-04-06T09:22:18 | 2016-04-06T09:22:18 | 54,695,513 | 2 | 1 | null | 2016-04-06T09:22:18 | 2016-03-25T04:57:31 | Python | UTF-8 | Python | false | false | 75 | py | from .post import Post
from .comment import Comment
from .like import Like
| [
"yevgnenll@gmail.com"
] | yevgnenll@gmail.com |
7de8fb97bfda1fbea1f83d3bc24dd88497e0a7b5 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_None/model_control_one_enabled_None_ConstantTrend_Seasonal_Minute_LSTM.py | aac22081d0e2dc667f76605a2ce93909efa6bbb6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 163 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['ConstantTrend'] , ['Seasonal_Minute'] , ['LSTM'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
b644afc68ff6f1165731f049805fffcf796fa801 | 4fbb26ab080a1c2da2511f461b205778fbbd5371 | /SConstruct | 9e0eb5d4ea2d6ddf517c2e1b9685cd61f7637ab5 | [
"MIT"
] | permissive | jorgeacortes/cpp-tdd-boilerplate | b1dcd9ae36c5019f7b891f6a3e7f42ae967ebf55 | e0755485cd518cae2db1c9f8fa0eb5400db4f7bb | refs/heads/master | 2021-02-28T15:56:12.425366 | 2020-08-15T09:09:31 | 2020-08-15T09:09:31 | 245,710,734 | 0 | 0 | MIT | 2020-08-15T09:09:32 | 2020-03-07T21:51:20 | Python | UTF-8 | Python | false | false | 287 | #!python
# Scons build system
# Launching a script to be able to build in a variant dir.
import sys
import os
cwd = os.getcwd() # Exporting SCons launch directory
# Default options
#SetOption('silent','yes')
SConscript('project.scons', variant_dir='build', duplicate=0, exports='cwd')
| [
"jorgeacortes@users.noreply.github.com"
] | jorgeacortes@users.noreply.github.com | |
7579afdc211671b68087eccbc1c6af6919ff700c | b65331c9102294897bc883858052cdfc7f16eb82 | /science/4_maxtrices.py | 9f6803ce30efaf77fd2a21e4bf43f34856f026a1 | [] | no_license | Seanistar/Carmen | ecd4bcc9d238ba6bdf5efa3c872c4e2416e26da0 | 39d23bb1883b4821346319952824b394fc775a23 | refs/heads/master | 2020-05-25T14:57:10.654789 | 2016-12-06T04:19:15 | 2016-12-06T04:19:15 | 70,120,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # functions for working with matrices
#
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def make_matrix(num_rows, num_cols, entry_fn):
return [[entry_fn(i, j) for j in range(num_cols)]
for i in range(num_rows)]
def matrix_add(A, B):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j): return A[i][j] + B[i][j]
return make_matrix(num_rows, num_cols, entry_fn)
| [
"seanistar@gmail.com"
] | seanistar@gmail.com |
7f7a5339e93a8e6c7dc50c9a9674e968f50a253d | af6bba388a4a14878d49198f244db15b1fbd4107 | /django_proyect/django_proyect/django_proyect/ven/bin/wheel | 2e360caa1334f2f6840f3d5448db590d284414de | [] | no_license | migueljimenezjc/django-boards | 6ea80d07f7d7d1b6f7ce7761a79f8f0aedeefbe1 | 4af8becd856117e8885cca0cd4f3692a56cf3512 | refs/heads/master | 2020-04-07T22:51:01.482182 | 2018-11-23T05:59:25 | 2018-11-23T05:59:25 | 158,787,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | #!/Users/migueljc/django_proyect/django_proyect/django_proyect/ven/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"miguel.jimenez.contreras@hotmail.com"
] | miguel.jimenez.contreras@hotmail.com | |
3494dd013fb5bf92aae2de7a8709a810974e7e13 | 923db33b9fc1a19c9128b3ed459a43b132a62de9 | /rl/core.py | b8aed0727206b5d2415ae29fae1574183966704d | [] | no_license | Fan-Lu/Memory-Q-Network | 5fc38bbe56f065c63b91ff7f05c099047db60ec8 | ff631842c42452460eb676ee4a6597178cdc0fa6 | refs/heads/master | 2021-05-11T19:36:49.198727 | 2018-01-15T01:18:01 | 2018-01-15T01:18:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,717 | py | # -*- coding: utf-8 -*-
import warnings
from copy import deepcopy
import numpy as np
from keras.callbacks import History
from rl.callbacks import TestLogger, TrainEpisodeLogger, TrainIntervalLogger, Visualizer, CallbackList
import time
class Agent(object):
"""Abstract base class for all implemented agents.
Each agent interacts with the environment (as defined by the `Env` class) by first observing the
state of the environment. Based on this observation the agent changes the environment by performing
an action.
Do not use this abstract base class directly but instead use one of the concrete agents implemented.
Each agent realizes a reinforcement learning algorithm. Since all agents conform to the same
interface, you can use them interchangeably.
To implement your own agent, you have to implement the following methods:
- `forward`
- `backward`
- `compile`
- `load_weights`
- `save_weights`
- `layers`
# Arguments
processor (`Processor` instance): See [Processor](#processor) for details.
"""
def __init__(self, processor=None):
self.processor = processor
self.training = False
self.step = 0
def get_config(self):
"""Configuration of the agent for serialization.
"""
return {}
def fit(self, env, nb_steps, envs=None, switch_rate=None, action_repetition=1, callbacks=None, verbose=1,
visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
nb_max_episode_steps=None):
"""Trains the agent on the given environment.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_steps (integer): Number of training steps to be performed.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = True
callbacks = [] if not callbacks else callbacks[:]
#Environment sampling
env_number = 0
if(envs == None):
envs = [env]
if verbose == 1:
callbacks += [TrainIntervalLogger(interval=log_interval)]
elif verbose > 1:
callbacks += [TrainEpisodeLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_steps': nb_steps,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_train_begin()
callbacks.on_train_begin()
episode = 0
#Need this to keep track of when to perform a context (env) switch.
context_steps = 0
self.step = 0
observation = None
episode_reward = None
episode_step = None
did_abort = False
try:
while self.step < nb_steps:
#time.sleep(0.05)
if observation is None: # start of a new episode
callbacks.on_episode_begin(episode)
#Sample from list of environments
#Sample every switch_rate steps
if(switch_rate != None and context_steps // switch_rate >= 1):
context_steps = context_steps % switch_rate
env_number = np.random.randint(len(envs))
elif(switch_rate == None or envs == [env]):
env_number = 0
env = envs[env_number]
callbacks._set_env(env)
episode_step = 0
episode_reward = 0.
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, reward, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, reward, done, info = self.processor.process_step(observation, reward, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# At this point, we expect to be fully initialized.
assert episode_reward is not None
assert episode_step is not None
assert observation is not None
# Run a single step.
callbacks.on_step_begin(episode_step)
# This is were all of the work happens. We first perceive and compute the action
# (forward step) and then use the reward to improve (backward step).
action = self.forward(observation, env_number)
if self.processor is not None:
action = self.processor.process_action(action)
reward = 0.
accumulated_info = {}
done = False
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
callbacks.on_action_end(action)
reward += r
if done:
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
# Force a terminal state.
done = True
metrics = self.backward(reward, terminal=done, env_number=env_number)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'metrics': metrics,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
context_steps += 1
episode_step += 1
self.step += 1
if done:
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation, env_number)
self.backward(0., terminal=False, env_number=env_number)
# This episode is finished, report and reset.
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
}
callbacks.on_episode_end(episode, episode_logs)
episode += 1
observation = None
episode_step = None
episode_reward = None
except KeyboardInterrupt:
# We catch keyboard interrupts here so that training can be be safely aborted.
# This is so common that we've built this right into this function, which ensures that
# the `on_train_end` method is properly called.
did_abort = True
callbacks.on_train_end(logs={'did_abort': did_abort})
self._on_train_end()
return history
def test(self, env, envs=None, switch_rate=None, nb_episodes=1, action_repetition=1, callbacks=None, visualize=True,
nb_max_episode_steps=None, nb_max_start_steps=0, start_step_policy=None, verbose=1):
"""Callback that is called before training begins."
"""
if not self.compiled:
raise RuntimeError('Your tried to test your agent but it hasn\'t been compiled yet. Please call `compile()` before `test()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = False
self.step = 0
context_steps = 0
callbacks = [] if not callbacks else callbacks[:]
#Environment sampling
if(envs == None):
envs = [env]
env_number = 0
if verbose >= 1:
callbacks += [TestLogger()]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_episodes': nb_episodes,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_test_begin()
callbacks.on_train_begin()
for episode in range(nb_episodes):
#Sample from list of environments
#Sample every switch_rate steps
if(switch_rate != None and context_steps // switch_rate >= 1):
context_steps = context_steps % switch_rate
env_number = np.random.randint(len(envs))
elif(switch_rate == None or envs == [env]):
env_number = 0
env = envs[env_number]
callbacks._set_env(env)
callbacks.on_episode_begin(episode)
episode_reward = 0.
episode_step = 0
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# Run the episode until we're done.
done = False
while not done:
callbacks.on_step_begin(episode_step)
action = self.forward(observation, env_number)
if self.processor is not None:
action = self.processor.process_action(action)
reward = 0.
accumulated_info = {}
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, d, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, d, info = self.processor.process_step(observation, r, d, info)
callbacks.on_action_end(action)
reward += r
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
if d:
done = True
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
done = True
self.backward(reward, terminal=done, env_number=env_number)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
context_steps += 1
#time.sleep(0.1)
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation, env_number)
self.backward(0., terminal=False, env_number=env_number)
# Report end of episode.
episode_logs = {
'episode_reward': episode_reward,
'nb_steps': episode_step,
}
callbacks.on_episode_end(episode, episode_logs)
callbacks.on_train_end()
self._on_test_end()
return history
def reset_states(self):
"""Resets all internally kept states after an episode is completed.
"""
pass
def forward(self, observation, env_number):
"""Takes the an observation from the environment and returns the action to be taken next.
If the policy is implemented by a neural network, this corresponds to a forward (inference) pass.
# Argument
observation (object): The current observation from the environment.
# Returns
The next action to be executed in the environment.
"""
raise NotImplementedError()
def backward(self, reward, terminal, env_number):
"""Updates the agent after having executed the action returned by `forward`.
If the policy is implemented by a neural network, this corresponds to a weight update using back-prop.
# Argument
reward (float): The observed reward after executing the action returned by `forward`.
terminal (boolean): `True` if the new state of the environment is terminal.
"""
raise NotImplementedError()
def compile(self, optimizer, metrics=[]):
"""Compiles an agent and the underlaying models to be used for training and testing.
# Arguments
optimizer (`keras.optimizers.Optimizer` instance): The optimizer to be used during training.
metrics (list of functions `lambda y_true, y_pred: metric`): The metrics to run during training.
"""
raise NotImplementedError()
def load_weights(self, filepath):
"""Loads the weights of an agent from an HDF5 file.
# Arguments
filepath (str): The path to the HDF5 file.
"""
raise NotImplementedError()
def save_weights(self, filepath, overwrite=False):
"""Saves the weights of an agent as an HDF5 file.
# Arguments
filepath (str): The path to where the weights should be saved.
overwrite (boolean): If `False` and `filepath` already exists, raises an error.
"""
raise NotImplementedError()
@property
def layers(self):
"""Returns all layers of the underlying model(s).
If the concrete implementation uses multiple internal models,
this method returns them in a concatenated list.
"""
raise NotImplementedError()
@property
def metrics_names(self):
"""The human-readable names of the agent's metrics. Must return as many names as there
are metrics (see also `compile`).
"""
return []
def _on_train_begin(self):
"""Callback that is called before training begins."
"""
pass
def _on_train_end(self):
"""Callback that is called after training ends."
"""
pass
def _on_test_begin(self):
"""Callback that is called before testing begins."
"""
pass
def _on_test_end(self):
"""Callback that is called after testing ends."
"""
pass
class Processor(object):
"""Abstract base class for implementing processors.
A processor acts as a coupling mechanism between an `Agent` and its `Env`. This can
be necessary if your agent has different requirements with respect to the form of the
observations, actions, and rewards of the environment. By implementing a custom processor,
you can effectively translate between the two without having to change the underlaying
implementation of the agent or environment.
Do not use this abstract base class directly but instead use one of the concrete implementations
or write your own.
"""
def process_step(self, observation, reward, done, info):
"""Processes an entire step by applying the processor to the observation, reward, and info arguments.
# Arguments
observation (object): An observation as obtained by the environment.
reward (float): A reward as obtained by the environment.
done (boolean): `True` if the environment is in a terminal state, `False` otherwise.
info (dict): The debug info dictionary as obtained by the environment.
# Returns
The tupel (observation, reward, done, reward) with with all elements after being processed.
"""
observation = self.process_observation(observation)
reward = self.process_reward(reward)
info = self.process_info(info)
return observation, reward, done, info
def process_observation(self, observation):
"""Processes the observation as obtained from the environment for use in an agent and
returns it.
"""
return observation
def process_reward(self, reward):
"""Processes the reward as obtained from the environment for use in an agent and
returns it.
"""
return reward
def process_info(self, info):
"""Processes the info as obtained from the environment for use in an agent and
returns it.
"""
return info
def process_action(self, action):
"""Processes an action predicted by an agent but before execution in an environment.
"""
return action
def process_state_batch(self, batch):
"""Processes an entire batch of states and returns it.
"""
return batch
@property
def metrics(self):
"""The metrics of the processor, which will be reported during training.
# Returns
List of `lambda y_true, y_pred: metric` functions.
"""
return []
@property
def metrics_names(self):
"""The human-readable names of the agent's metrics. Must return as many names as there
are metrics (see also `compile`).
"""
return []
class MultiInputProcessor(Processor):
"""Converts observations from an environment with multiple observations for use in a neural network
policy.
In some cases, you have environments that return multiple different observations per timestep
(in a robotics context, for example, a camera may be used to view the scene and a joint encoder may
be used to report the angles for each joint). Usually, this can be handled by a policy that has
multiple inputs, one for each modality. However, observations are returned by the environment
in the form of a tuple `[(modality1_t, modality2_t, ..., modalityn_t) for t in T]` but the neural network
expects them in per-modality batches like so: `[[modality1_1, ..., modality1_T], ..., [[modalityn_1, ..., modalityn_T]]`.
This processor converts observations appropriate for this use case.
# Arguments
nb_inputs (integer): The number of inputs, that is different modalities, to be used.
Your neural network that you use for the policy must have a corresponding number of
inputs.
"""
def __init__(self, nb_inputs):
self.nb_inputs = nb_inputs
def process_state_batch(self, state_batch):
input_batches = [[] for x in range(self.nb_inputs)]
for state in state_batch:
processed_state = [[] for x in range(self.nb_inputs)]
for observation in state:
assert len(observation) == self.nb_inputs
for o, s in zip(observation, processed_state):
s.append(o)
for idx, s in enumerate(processed_state):
input_batches[idx].append(s)
return [np.array(x) for x in input_batches]
# Note: the API of the `Env` and `Space` classes are taken from the OpenAI Gym implementation.
# https://github.com/openai/gym/blob/master/gym/core.py
class Env(object):
"""The abstract environment class that is used by all agents. This class has the exact
same API that OpenAI Gym uses so that integrating with it is trivial. In contrast to the
OpenAI Gym implementation, this class only defines the abstract methods without any actual
implementation.
"""
reward_range = (-np.inf, np.inf)
action_space = None
observation_space = None
def step(self, action):
"""Run one timestep of the environment's dynamics.
Accepts an action and returns a tuple (observation, reward, done, info).
# Arguments
action (object): An action provided by the environment.
# Returns
observation (object): Agent's observation of the current environment.
reward (float) : Amount of reward returned after previous action.
done (boolean): Whether the episode has ended, in which case further step() calls will return undefined results.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
"""
raise NotImplementedError()
def reset(self):
"""
Resets the state of the environment and returns an initial observation.
# Returns
observation (object): The initial observation of the space. Initial reward is assumed to be 0.
"""
raise NotImplementedError()
def render(self, mode='human', close=False):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.)
# Arguments
mode (str): The mode to render with.
close (bool): Close all open renderings.
"""
raise NotImplementedError()
def close(self):
"""Override in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
raise NotImplementedError()
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
# Returns
Returns the list of seeds used in this env's random number generators
"""
raise NotImplementedError()
def configure(self, *args, **kwargs):
"""Provides runtime configuration to the environment.
This configuration should consist of data that tells your
environment how to run (such as an address of a remote server,
or path to your ImageNet data). It should not affect the
semantics of the environment.
"""
raise NotImplementedError()
def __del__(self):
self.close()
def __str__(self):
return '<{} instance>'.format(type(self).__name__)
class Space(object):
"""Abstract model for a space that is used for the state and action spaces. This class has the
exact same API that OpenAI Gym uses so that integrating with it is trivial.
"""
def sample(self, seed=None):
"""Uniformly randomly sample a random element of this space.
"""
raise NotImplementedError()
def contains(self, x):
"""Return boolean specifying if x is a valid member of this space
"""
raise NotImplementedError()
| [
"phillip.martin012@gmail.com"
] | phillip.martin012@gmail.com |
a3c3350d331393ea83eede2ee19ca9d984c1bfd0 | 589385876626ffdf0e3a960a191eed9b90fa6eb2 | /trunk/aztk/web/user_homepage.py | 0caf42cd3ffc7a4a1681b32a784c6440e5ed2011 | [
"BSD-3-Clause"
] | permissive | BGCX261/zoto-server-svn-to-git | bb7545852bd52d6626f3b2b9c0b1d5834eb08201 | 73abf60264ae5a6b610d19e25be833f0754b160e | refs/heads/master | 2021-01-21T23:04:02.366636 | 2015-08-25T15:16:12 | 2015-08-25T15:16:12 | 41,602,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,309 | py | """
dyn_pages/user_homepage.py
Author: Trey Stout
Date Added: ?
User homepage. Also the starting point for all user interior pages.
"""
## STD LIBS
## OUR LIBS
from zoto_base_page import zoto_base_page
from dyn_image_handler import dyn_image_handler
from user_albums import user_albums
from user_galleries import user_galleries
from user_publish import user_publish
from feeds import feeds
from other_sizes import other_sizes
## 3RD PARTY LIBS
from twisted.web.util import redirectTo
from twisted.internet.defer import Deferred
from nevow import loaders, inevow, tags as T
class user_homepage(zoto_base_page):
local_js_includes = [
"countries.js",
"static_news.js",
"select_box.lib.js",
"pagination.lib.js",
"e_paper.lib.js",
"globber.lib.js",
"tag_cloud.lib.js",
"comments.lib.js",
"albums.lib.js",
"featured_media.lib.js",
"widget.lib.js",
"image_detail.lib.js",
"table.lib.js",
"lookahead.lib.js",
"detect.lib.js",
"third_party/swfobject.js",
"messages.lib.js"
]
page_manager_js = "managers/user_homepage.js"
def __init__(self, username):
zoto_base_page.__init__(self)
self.username = username.lower()
def _get_browse_username(self, ctx):
return self.username
def render_my_photo_link(self, ctx, data):
return '/%s/photos/' % self.username
# def get_avatar_permission():
# def handle_info(perm_info):
# if perm_info.get('view_flag', 3):
#
# d = self.app.api.permissions.get_image_permissions(self.username, user_info['avatar_id'])
# d.addCallback(handle_info)
def avatar_handler(self, ctx, size):
request = inevow.IRequest(ctx)
color_option = self._get_color_option(ctx)
bg, fg = color_option.split("_")
def handle_avatar_display(result):
if not result['can_view']:
# generic avatar
return redirectTo('/image/avatar-%d.jpg' % size, request)
else:
# browser has permission to view avatar, so show it
new_segments = [str(size), self.avatar_id]
handler = dyn_image_handler(self.username, self.app, self.log)
handler.set_segments(new_segments)
return handler
def get_auth_username(self):
d2 = Deferred()
auth_hash = request.getCookie('auth_hash')
if auth_hash:
self.auth_username = auth_hash.split(':')[0].lower()
else:
self.auth_username = ""
d2.callback(0)
return d2
# look up id
def handle_info(result):
if result[0] != 0:
return redirectTo('/image/avatar-%d.jpg' % size, request)
user_info = result[1]
# Does the user have an avatar selected
if user_info.get('avatar_id', None):
self.avatar_id = user_info['avatar_id']
# then check if username can view it
d3 = self.app.db.query("""
SELECT zoto_user_can_view_media(
zoto_get_user_id(%s),
zoto_get_image_id(zoto_get_user_id(%s), %s),
zoto_get_user_id(%s)
) AS can_view
""", (self.username, self.username, user_info['avatar_id'], self.auth_username), single_row=True)
d3.addCallback(handle_avatar_display)
return d3
else:
# generic avatar
return redirectTo('/image/bg_%s/%s/avatar-%d.jpg' % (bg, fg, size), request)
def get_user_info(result):
if result[0] != 0:
return redirectTo('/image/bg_%s/%s/avatar-%d.jpg' % (bg, fg, size), request)
return self.app.api.users.get_info(result[1], result[1])
d = get_auth_username(self)
d.addCallback(lambda _: self.app.api.users.get_user_id(self.username))
d.addCallback(get_user_info)
d.addCallback(handle_info)
return d
def child_img(self, ctx):
return dyn_image_handler(self.username, self.app, self.log)
def child_feeds(self, ctx):
return feeds(self.username, self.app, self.log)
def child_albums(self, ctx):
return user_albums(self.username)
def child_galleries(self, ctx):
return user_galleries(self.username)
def child_publish(self, ctx):
return user_publish(self.username)
def child_avatar_small(self, ctx):
return self.avatar_handler(ctx, 11)
def child_avatar_large(self, ctx):
return self.avatar_handler(ctx, 18)
def child_other_sizes(self, ctx):
return other_sizes(self.username)
def childFactory(self, ctx, name):
if name == "":
return self
setattr(user_homepage, "child_avatar.jpg", user_homepage.child_avatar_large)
setattr(user_homepage, "child_avatar-small.jpg", user_homepage.child_avatar_small)
| [
"you@example.com"
] | you@example.com |
7e19b1f65c28d7e8d33d9f9df1406f25cab5200c | fb652a77dd6dba0c971ac052271e1e03ff7c0d6e | /settings/migrations/0008_auto_20181113_0656.py | 9d9fe0d0ca6b8a4d8782e2b78f2ff35543fb2503 | [] | no_license | ryosuwito/mt-commerce | 5fa8419650d8c089bc8baf75322389141b4522af | 90359dada36ab903dbf30f3ab6616a3c4ed3f655 | refs/heads/master | 2022-12-11T05:22:31.037960 | 2019-03-07T10:59:58 | 2019-03-07T10:59:58 | 169,371,940 | 0 | 0 | null | 2022-12-08T01:21:07 | 2019-02-06T07:56:27 | HTML | UTF-8 | Python | false | false | 592 | py | # Generated by Django 2.0.8 on 2018-11-13 06:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings', '0007_auto_20181113_0655'),
]
operations = [
migrations.AlterField(
model_name='footerlink',
name='addr',
field=models.CharField(blank=True, max_length=400, null=True),
),
migrations.AlterField(
model_name='headerlink',
name='addr',
field=models.CharField(blank=True, max_length=400, null=True),
),
]
| [
"ryo@ryolaptop.ryosuwito.com"
] | ryo@ryolaptop.ryosuwito.com |
e665003a52abaad2e29156ec090c509b0c5759f6 | e2ca5913252a7ff0d59d15bfefe22c87536d68fb | /venv/Scripts/easy_install-3.7-script.py | 12eda0b77eb3be0c1ee59c3898afbf47c87c6add | [] | no_license | ACAA06/traveler | 74ae2792d1991233bc9d764b7538255f3cdc5a75 | 264cda3fd5fa64caef939c73221b7d7cb77c0a8f | refs/heads/master | 2022-07-15T10:34:53.354080 | 2020-05-11T18:20:36 | 2020-05-11T18:20:36 | 263,122,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #!D:\academics\endsemopenlab\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"clementjoe99@gmail.com"
] | clementjoe99@gmail.com |
1055e5be5726757fa80bb3ebe93883dab06b38ae | f8ca9769a359795dc649c46e299cd9b3bfca1864 | /forum/spiders/cancer_cancerforums_spider.py | f3a2c67fe128c84d117d7c3248a56d9a73bcdde2 | [] | no_license | florencefantine/ehealth_scraper | d4093a67543f653de200e6610eaaf65842aa322f | 886ed12d0a605584796dea11b532883c1e86d09a | refs/heads/master | 2021-01-10T11:39:58.270130 | 2015-11-24T22:09:34 | 2015-11-24T22:09:34 | 45,852,359 | 1 | 6 | null | 2015-11-24T20:52:28 | 2015-11-09T16:51:54 | Python | UTF-8 | Python | false | false | 3,237 | py | # -*- coding: utf-8 -*-
import scrapy
import hashlib
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.selector import Selector
from forum.items import PostItemsList
import re
from bs4 import BeautifulSoup
import logging
import string
import dateparser
import time
# Spider for crawling Adidas website for shoes
class ForumsSpider(CrawlSpider):
name = "cancer_cancerforums_spider"
allowed_domains = ["www.cancerforums.net"]
start_urls = [
"http://www.cancerforums.net/forums/13-Lung-Cancer-Forum",
"http://www.cancerforums.net/forums/14-Prostate-Cancer-Forum"
]
rules = (
# Rule to go to the single product pages and run the parsing function
# Excludes links that end in _W.html or _M.html, because they point to
# configuration pages that aren't scrapeable (and are mostly redundant anyway)
Rule(LinkExtractor(
restrict_xpaths='//h3/a[@class="title"]',
), callback='parsePostsList'),
# Rule to follow arrow to next product grid
Rule(LinkExtractor(
restrict_xpaths='//span[@class="prev_next"]/a[@rel="next"]'
), follow=True),
)
def cleanText(self, str):
soup = BeautifulSoup(str, 'html.parser')
return re.sub(" +|\n|\r|\t|\0|\x0b|\xa0",' ',soup.get_text()).strip()
def getDate(self,date_str):
# date_str="Fri Feb 12, 2010 1:54 pm"
try:
date = dateparser.parse(date_str)
epoch = int(date.strftime('%s'))
create_date = time.strftime("%Y-%m-%d'T'%H:%M%S%z", time.gmtime(epoch))
return create_date
except Exception:
#logging.error(">>>>>"+date_str)
return date_str
# https://github.com/scrapy/dirbot/blob/master/dirbot/spiders/dmoz.py
# https://github.com/scrapy/dirbot/blob/master/dirbot/pipelines.py
def parsePostsList(self,response):
sel = Selector(response)
posts = sel.xpath('//ol[@class="posts"]/li[@class="postbitlegacy postbitim postcontainer old"]')
condition = "cancer"
items = []
topic = response.xpath('//h1/span[@class="threadtitle"]/a/text()').extract_first()
url = response.url
for post in posts:
item = PostItemsList()
item['author'] = post.xpath('.//div[@class="popupmenu memberaction"]/a/strong/text()').extract_first()
item['author_link'] = post.xpath('.//div[@class="popupmenu memberaction"]/a/@href').extract_first()
item['condition'] = condition
item['create_date'] = self.getDate(post.xpath('.//span[@class="date"]/text()').extract_first().replace(',','').strip())
item['domain'] = "".join(self.allowed_domains)
item['post'] = re.sub(r'\s+',' ',self.cleanText(" ".join(post.xpath('.//div[@class="content"]//blockquote/text()').extract())))
# item['tag']=''
item['topic'] = topic
item['url']=url
items.append(item)
return items
| [
"joyce@selfcarecatalysts.com"
] | joyce@selfcarecatalysts.com |
631e47868021d1bce89292c230803398fd53ffc7 | ff8a16483ccd71fcb05b6d9b8174e4b037dc7c3a | /KnightTour.py | 12ca45e5e411f1fb893b1e513eea38688d2f19b4 | [] | no_license | Mihyar-30614/Backtracking_Algorithm | 9543051d195b4adca6e5518e0ff73ecbfdadc223 | f159a3392bf59e88b1c8317a8df6ee539f1f39e1 | refs/heads/master | 2020-11-29T02:52:18.694158 | 2019-12-28T04:57:19 | 2019-12-28T04:57:19 | 230,000,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # Cheesboard size
size = 8
# Helper Function to print Solution
def printSolution(board):
for i in range(size):
for j in range(size):
print(str(board[i][j]).zfill(2), end=' ')
print()
# Helper function to check if i,j are in n*n board
def isSafe(board, new_x, new_y):
if (new_x >= 0 and new_y >= 0 and new_x < size and new_y < size and board[new_x][new_y] == -1):
return True
return False
# Solver function to solve the issue
def solver(board, current_x, current_y, move_x, move_y, counter):
# If all visited, we're done
if counter == size**2:
return True
# Try all the possible solutions for current position
for i in range(8):
new_x = current_x + move_x[i]
new_y = current_y + move_y[i]
if isSafe(board, new_x, new_y):
board[new_x][new_y] = counter
if solver(board, new_x, new_y, move_x, move_y, counter+1):
return True
else:
# Backtracking solution
board[new_x][new_y] = -1
return False
# Driver Function
if __name__ == "__main__":
# Initialize Board with -1, Knight start at first position
board = [[-1 for i in range(size)] for i in range(size)]
board[0][0] = 0
# Possible moves for a Knight
move_x = [2, 1, -1, -2, -2, -1, 1, 2]
move_y = [1, 2, 2, 1, -1, -2, -2, -1]
# Counter for the Knight's move
counter = 1
if not solver(board, 0, 0, move_x, move_y, counter):
print("Solution could not be found.")
else:
printSolution(board) | [
"eng.mihyear@gmail.com"
] | eng.mihyear@gmail.com |
62d19f421d2dd886221143b7b6571930a7ba1a9d | 940ad5e16ede0c344066c8f41f8b9788061bb0ec | /ABC064/a.py | 070c897bf5f4382877ef61cc0306a02253741854 | [] | no_license | niwanowa/AtCoder | 03fe69c97d5503aabb5a3a42ee40c9824b128289 | f4218cf7e13d026a2a439a82340e208c10f5cef5 | refs/heads/master | 2021-07-24T05:23:40.624506 | 2020-05-12T12:36:01 | 2020-05-12T12:36:01 | 167,095,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | r, g, b = map(int, input().split())
if (4*100+g*10+b) % 4 == 0:
print("YES")
else:
print("NO") | [
"be_girl@outlook.jp"
] | be_girl@outlook.jp |
af067fb2a0ba3b52e560e01b6981692f5a0250ec | 80b09d3113d130ef1015bf07a42bf48f5f07ff86 | /strings.py | 5825711c6803cb53ecf365ae3d945f1ca17a2ccc | [] | no_license | neelkanthk/pythonpractice | df33eef806ef47f53394619559e47d5e588e4b35 | b7160bb96f518a22d83e681e68fd68ba69bbb39e | refs/heads/master | 2020-04-03T07:26:32.530829 | 2018-10-29T18:51:44 | 2018-10-29T18:51:44 | 155,102,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # string operations
firstname = "Neelkanth"
lastname = "Kaushik"
fullname = firstname + " " + lastname # concatenate
print len(fullname) # string length
print firstname.lower() # convert to lower case
print lastname.upper() # convert to upper case
print fullname.strip("N") # remove char from last or start
print fullname.split(" ") # split a string by a character
print fullname.count("e") # count number of character
print fullname.replace("n", "N") #replace n by N
| [
"neelkanthkaushik2014@gmail.com"
] | neelkanthkaushik2014@gmail.com |
e383b84ceea267035e9ef73eafa68ff3e951079b | a6170d9d555ea3864a2e6fed054ae1af07393751 | /lab_03/fft/fft.py | 928247596b4274b52eb3cd129f39b86e41fc39e2 | [] | no_license | svinkapeppa/comp_math | c179f86be07d3b28e7061a65107b8f3b323ecbed | 8e87822389c3040e4fb6dc46c6e9e38a38fad6d5 | refs/heads/master | 2020-04-02T10:52:58.496614 | 2018-12-13T14:31:02 | 2018-12-13T14:31:02 | 154,359,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | import argparse
from utils import czt
def transform(input_path, output_path):
data = []
with open(input_path) as fd:
for line in fd:
data.append(line.strip().split())
data = [float(item) for row in data for item in row]
with open(output_path, 'w') as fd:
for item in czt(data, 3):
fd.write('{} '.format(item))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', action='store', nargs='?',
default='examples/example.txt',
help='File containing the input sequence')
parser.add_argument('-o', '--output', action='store', nargs='?',
default='results/result.txt',
help='File containing the result of the transformation')
args = parser.parse_args()
transform(args.input, args.output)
| [
"erubanenko@gmail.com"
] | erubanenko@gmail.com |
792fba203b2bf6fd8ce56981e0f9ed7dc868c823 | 4e879e994720100a9354895af2bb9be33b38a42b | /xUdemy_tkinter/Sect3_1stTkinterApp/45_HelloWorld_01.py | 6391cf50cb181338076904a71e1464d8797f5147 | [] | no_license | pepitogrilho/learning_python | 80314ec97091238ed5cc3ed47422d2e6073a3280 | bbdc78a9a0513c13d991701859bcfe7a8e614a49 | refs/heads/master | 2023-04-09T15:07:08.866721 | 2023-04-02T18:45:47 | 2023-04-02T18:45:47 | 230,527,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | # -*- coding: utf-8 -*-
"""
"""
import tkinter as tk
from tkinter import ttk
root = tk.Tk()
root.mainloop()
| [
"pepitogrilho@gmail.com"
] | pepitogrilho@gmail.com |
770dd8f7e0374c04f25a59af4dcd0b8a8d34cff5 | 3491031eba6fdbd4dcd1c03b5ff2a95223c2b2ba | /allfiles/Kap5/1dheatv4.py | e97693f29df0879bb72b840e97ad7ab421ec27ba | [] | no_license | mdruger/tkt4140 | 4cb758f0f28634899ac704ad00a556730e3e5122 | dcbef70125141ec77e3e418ddc4f14e19b8a5ff0 | refs/heads/master | 2020-06-12T02:15:35.409603 | 2014-12-29T12:34:30 | 2014-12-29T12:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,810 | py | # The equation solved is the parabolic equaiton
#
# du d du
# -- = k -- --
# dt dx dx
#
# along with boundary conditions
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import scipy as sc
import scipy.sparse
import scipy.sparse.linalg
import time
from numpy import newaxis, min, max
import matplotlib.animation as animation
# change some default values to make plots more readable on the screen
LNWDT = 3; FNT = 15
matplotlib.rcParams['lines.linewidth'] = LNWDT; matplotlib.rcParams['font.size'] = FNT
def explicit_python_solver(u_left=1.0, u_right=0.0, nx=20, r=0.5, xmin=0.0, xmax=1.0, tmin=0.0, tmax=1.0, k=1.0):
"""explicit python solver"""
dx = float(xmax-xmin)/nx
u = np.zeros((nx+1, 1), 'd')
u_old = np.zeros((nx+1, 1), 'd')
x = np.linspace(xmin,xmax,nx+1)
# set boundary conditions
u[0] = u_left
u_old[-1] = u_right
dt = r*dx**2/k # compute timestep based on Fourier number, dx and diffusivity
m = round((tmax-tmin)/dt) # number of temporal intervals
time = np.linspace(tmin,tmax,m)
# advance in time
for t in time:
u_old[:] = u[:]
for i in range(1,nx):
u[i] = r*(u_old[i-1] + u_old[i+1]) + (1.0 - 2.0*r)*u_old[i]
return x, u
def explicit_numpy_solver(u_left=1.0, u_right=0.0, nx=20, r=0.5, xmin=0.0, xmax=1.0, tmin=0.0, tmax=1.0, k=1.0):
dx = float(xmax-xmin)/nx
u = np.zeros((nx+1, 1), 'd')
x = np.linspace(xmin,xmax,nx+1)
# set boundary conditions
u[0] = u_left
u[-1] = u_right
dt = r*dx**2/k # compute timestep based on Fourier number, dx and diffusivity
m=round((tmax-tmin)/dt) # number of temporal intervals
time=np.linspace(tmin,tmax,m)
# advance in time
for t in time:
u[1:-1] = r*(u[0:-2] + u[2:]) + (1.0 - 2.0*r)*u[1:-1]
return x, u
def implicit_numpy_solver(u_left=1.0, u_right=0.0, nx=20, r=0.5, xmin=0.0, xmax=1.0, tmin=0.0, tmax=1.0, k=1.0, theta=1.0):
dx = float(xmax-xmin)/nx
u = np.zeros((nx+1, 1), 'd')
x = np.linspace(xmin,xmax,nx+1)
u[0] = u_left
u[-1] = u_right
dt = r*dx**2/k # compute timestep based on Fourier number, dx and diffusivity
m = round((tmax-tmin)/dt) # number of temporal intervals
time = np.linspace(tmin,tmax,m)
# create matrix for sparse solver. Solve for interior values only (nx-1)
diagonals = np.zeros((3,nx-1))
diagonals[0,:] = -r*theta # all elts in first row is set to 1
diagonals[1,:] = 1 + 2.0*r*theta
diagonals[2,:] = -r*theta
As = sc.sparse.spdiags(diagonals, [-1,0,1], nx-1, nx-1,format='csc') # sparse matrix instance
# create rhs array
d = np.zeros((nx-1,1),'d')
# advance in time and solve tridiagonal system for each t in time
for t in time:
d[:] = u[1:-1] + r*(1 - theta)*(u[0:-2] - 2.0*u[1:-1] + u[2:])
d[0] += r*theta*u[0]
w = sc.sparse.linalg.spsolve(As,d)
u[1:-1] = w[:,None]
return x, u
def implicit_numpy_solver_v2(u_left=1.0, u_right=0.0, nx=20, r=0.5, xmin=0.0, xmax=1.0, tmin=0.0, tmax=1.0, k=1.0, theta=1.0):
dx = float(xmax-xmin)/nx
u = np.zeros((nx+1, 1), 'd')
x = np.linspace(xmin,xmax,nx+1)
u[0] = u_left
u[-1] = u_right
dt = r*dx**2/k # compute timestep based on Fourier number, dx and diffusivity
m = round((tmax-tmin)/dt) # number of temporal intervals
time = np.linspace(tmin,tmax,m)
uv = np.zeros((nx+1,m),'d')
uv[0,:] = u_left
uv[-1,:] = u_right
# create matrix for sparse solver. Solve for interior values only (nx-1)
diagonals = np.zeros((3,nx-1))
diagonals[0,:] = -r*theta # all elts in first row is set to 1
diagonals[1,:] = 1 + 2.0*r*theta
diagonals[2,:] = -r*theta
As = sc.sparse.spdiags(diagonals, [-1,0,1], nx-1, nx-1,format='csc') # sparse matrix instance
# create rhs array
d = np.zeros((nx-1,1),'d')
# advance in time and solve tridiagonal system for each t in time
for i, t in enumerate(time):
d[:] = u[1:-1] + r*(1 - theta)*(u[0:-2] - 2.0*u[1:-1] + u[2:])
d[0] += r*theta*u[0]
w = sc.sparse.linalg.spsolve(As,d)
u[1:-1] = w[:,None]
uv[1:-1,i] = w[:]
return x, time, uv
## Main program starts here
nx = 20 # number of nodes
L = 1.0 # length of beam
tmax = 0.25 # time length
theta = 1.0 # parameter for implicitness: theta=0.5 Crank-Nicholson, theta=1.0 fully implicit
solvernames = [explicit_python_solver,explicit_numpy_solver,implicit_numpy_solver]
lstyle = ['r-', ':', '.', '-.', '--']
i = 0
legends=[]
fig0 = plt.figure(0)
for solve in solvernames:
tic = time.time()
x, u = solve(u_left=100.0, u_right=0.0, nx=nx, r=0.5, xmin=0.0, xmax=L, tmin=0.0, tmax=tmax, k=1.0)
toc = time.time()
cputime = toc - tic
legends.append(solve.__name__)
print legends[i], '\t cpu time = ', cputime
plt.plot(x,u,lstyle[i])
i += 1
plt.legend(legends)
plt.title('Temperature field')
plt.xlabel('Position on beam')
plt.ylabel('Temperature')
## Solve with implicit solver and get solution for each timestep
x, time, uv = implicit_numpy_solver_v2(u_left=100.0, u_right=0.0, nx=nx, r=0.5, xmin=0.0, xmax=L, tmin=0.0, tmax=tmax, k=1.0)
def update_line(num, data, line):
line.set_data(data[0],data[1][:,num])
return line,
fig1 = plt.figure()
data = [x,uv]
l, = plt.plot([], [], 'b-')
plt.xlim(min(data[0]),max(data[0]))
plt.ylim(min(data[1][:,0]),max(data[1][:,0]))
plt.xlabel('x')
plt.title('Animation of temperature field')
line_ani = animation.FuncAnimation(fig1, update_line, uv.shape[1], fargs=(data, l),
interval=5, blit=False)
#line_ani.save('lines.mp4')
plt.show()
| [
"leif.r.hellevik@ntnu.no"
] | leif.r.hellevik@ntnu.no |
9618d0ea8bcd0e39563734aa75c10cfbf72a6dde | bbd69601912a3361d788efd03a47f9d4e3bac09e | /unittests/test_propgriddefs.py | ba6c67748213bd2fab4de8c1bcd982fff4d23034 | [] | no_license | wxWidgets/Phoenix | 56929484460a0399a8f1d9582bc77c20aa14748d | a1184286703cf24c4b88e5bc14cf2979c1b1ea00 | refs/heads/master | 2023-09-01T07:10:17.437093 | 2023-08-31T05:38:01 | 2023-08-31T05:38:01 | 5,078,061 | 2,268 | 677 | null | 2023-09-09T17:06:59 | 2012-07-17T06:22:25 | Python | UTF-8 | Python | false | false | 1,278 | py | import unittest
from unittests import wtc
import wx
import wx.propgrid as pg
#---------------------------------------------------------------------------
class propgriddefs_Tests(wtc.WidgetTestCase):
def test_propgriddefs1(self):
pg.PG_INVALID_VALUE
pg.PG_DONT_RECURSE
pg.PG_BASE_OCT
pg.PG_BASE_DEC
pg.PG_BASE_HEX
pg.PG_BASE_HEXL
pg.PG_PREFIX_NONE
pg.PG_PREFIX_0x
pg.PG_PREFIX_DOLLAR_SIGN
pg.PG_KEEP_STRUCTURE
pg.PG_RECURSE
pg.PG_INC_ATTRIBUTES
pg.PG_RECURSE_STARTS
pg.PG_FORCE
pg.PG_SORT_TOP_LEVEL_ONLY
pg.PG_FULL_VALUE
pg.PG_REPORT_ERROR
pg.PG_PROPERTY_SPECIFIC
pg.PG_EDITABLE_VALUE
pg.PG_COMPOSITE_FRAGMENT
pg.PG_UNEDITABLE_COMPOSITE_FRAGMENT
pg.PG_VALUE_IS_CURRENT
pg.PG_PROGRAMMATIC_VALUE
pg.PG_SETVAL_REFRESH_EDITOR
pg.PG_SETVAL_AGGREGATED
pg.PG_SETVAL_FROM_PARENT
pg.PG_SETVAL_BY_USER
pg.PG_LABEL
pg.PG_LABEL_STRING
pg.PG_NULL_BITMAP
pg.PG_COLOUR_BLACK
pg.PG_DEFAULT_IMAGE_SIZE
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| [
"robin@alldunn.com"
] | robin@alldunn.com |
53a4aac191b28eea03daa7302e8977d24061472a | e8cac4db53b22a28f7421ede9089bd3d4df81c82 | /TaobaoSdk/Domain/AuthorizeMessage.py | 3d3a557b07f3221566eec45fb68865e80848f9d1 | [] | no_license | wangyu0248/TaobaoOpenPythonSDK | af14e84e2bada920b1e9b75cb12d9c9a15a5a1bd | 814efaf6e681c6112976c58ec457c46d58bcc95f | refs/heads/master | 2021-01-19T05:29:07.234794 | 2012-06-21T09:31:27 | 2012-06-21T09:31:27 | 4,738,026 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,921 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 增量API
# @author wuliang@maimiaotech.com
# @date 2012-06-09 16:55:43
# @version: 0.0.16
from datetime import datetime
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
if __getCurrentPath() not in sys.path:
sys.path.insert(0, __getCurrentPath())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">增量API</SPAN>
class AuthorizeMessage(object):
def __init__(self, kargs=dict()):
super(self.__class__, self).__init__()
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">ISV的AppKey</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">12023790</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.app_key = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">授权用户的淘宝昵称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">我是一个用户名</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.nick = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权开始时间。授权当天开始计算。start_date是每个授权周期开始的时间,如果这个周期没有结束用户就延长或修改了授权周期,这个开始时间是不会变的,除非这个周期结束以后再重新开始新的周期,这个字段才会被改变</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Date</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">2000-01-01 00:00:00</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.start_date = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权到期时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Date</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">2000-01-01 00:00:00</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.end_date = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户创建授权给当前ISV的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Date</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">2000-01-01 00:00:00</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.created = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权信息修改时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Date</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">2000-01-01 00:00:00</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.modified = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权状态:normal(正常),expired(过期)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">normal</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.status = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权是否已经生效(生效表示能够收到变更消息)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Boolean</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.valid = None
self.__init(kargs)
def _newInstance(self, name, value):
propertyType = self._getPropertyType(name)
if propertyType == bool:
return value
elif propertyType == datetime:
format = "%Y-%m-%d %H:%M:%S"
return datetime.strptime(value, format)
elif propertyType == str:
return value.encode("utf-8")
else:
return propertyType(value)
def _getPropertyType(self, name):
properties = {
"app_key": "String",
"nick": "String",
"start_date": "Date",
"end_date": "Date",
"created": "Date",
"modified": "Date",
"status": "String",
"valid": "Boolean",
}
nameType = properties[name]
pythonType = None
if nameType == "Number":
pythonType = int
elif nameType == "String":
pythonType = str
elif nameType == 'Boolean':
pythonType = bool
elif nameType == "Date":
pythonType = datetime
elif nameType == 'Field List':
pythonType == str
elif nameType == 'Price':
pythonType = float
elif nameType == 'byte[]':
pythonType = str
else:
pythonType = getattr(
sys.modules[os.path.basename(
os.path.dirname(os.path.realpath(__file__))) + "." + nameType],
nameType)
return pythonType
def __init(self, kargs):
if kargs.has_key("app_key"):
self.app_key = self._newInstance("app_key", kargs["app_key"])
if kargs.has_key("nick"):
self.nick = self._newInstance("nick", kargs["nick"])
if kargs.has_key("start_date"):
self.start_date = self._newInstance("start_date", kargs["start_date"])
if kargs.has_key("end_date"):
self.end_date = self._newInstance("end_date", kargs["end_date"])
if kargs.has_key("created"):
self.created = self._newInstance("created", kargs["created"])
if kargs.has_key("modified"):
self.modified = self._newInstance("modified", kargs["modified"])
if kargs.has_key("status"):
self.status = self._newInstance("status", kargs["status"])
if kargs.has_key("valid"):
self.valid = self._newInstance("valid", kargs["valid"])
| [
"liyangmin@maimiaotech.com"
] | liyangmin@maimiaotech.com |
36f68e24f00be8957d914c829267f443aa391fd7 | 6e373b40393fb56be4437c37b9bfd218841333a8 | /Level_8/Lecture_8/Lecture_8/urls.py | 7ab7ccb82c8b8800412050eedcb9ac79244bbb63 | [] | no_license | mahto4you/Django-Framework | 6e56ac21fc76b6d0352f004a5969f9d4331defe4 | ee38453d9eceea93e2c5f3cb6895eb0dce24dc2b | refs/heads/master | 2023-01-22T01:39:21.734613 | 2020-12-04T03:01:17 | 2020-12-04T03:01:17 | 318,383,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """Lecture_8 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from enroll import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home),
]
| [
"mahto4you@gmail.com"
] | mahto4you@gmail.com |
39e9192043571cd8f92f301ed58923d316e6ea5b | 15863dafe261decf655286de2fcc6e67cadef3d8 | /website/apps/advisor_portal/views/loan_profile_v1.py | 22f8087c08cbb5b4ecde950fc3832e48e7998731 | [] | no_license | protoprojects/worksample | 5aa833570a39d5c61e0c658a968f28140694c567 | f1a8cd8268d032ea8321e1588e226da09925b7aa | refs/heads/master | 2021-06-26T17:34:10.847038 | 2017-09-14T00:14:03 | 2017-09-14T00:14:03 | 103,463,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,040 | py | import logging
import datetime
from django.db.models import Prefetch, BooleanField, Case, Value, When, Q
from django.http import Http404
from rest_framework import viewsets, decorators, status, filters
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework_extensions.mixins import NestedViewSetMixin
from advisor_portal.views.mixins import AdvisorTokenAuthMixin
from advisor_portal.filters import LoanProfileInProgressFilter
from advisor_portal.views import CRUD_ACTIONS, ENDPOINT_PROPERTY_METHODS
from advisor_portal.views.mixins import AdvisorSetMixin
from advisor_portal.views.loan_profile_v1_common import (
AdvisorLoanProfileV1BorrowerBaseView, BorrowerResourcesMixin,
CoborrowerResourcesMixin, CommonAddressView,
RestrictKindCreation, RestrictIncomesKindCreation,
HoldingAssetsOwnershipMixin, SelectForUpdateMixin, LiabilitiesRestrictionMixin,
)
from advisor_portal.paginators import (
SmallLimitOffsetPagination, LargePagePagination
)
from advisor_portal.permissions import (
AllowAdvisorPermission, LoanProfileModifyOperationsPermission,
)
from advisor_portal.serializers.loan_profile_v1 import (
AddressV1Serializer,
AdvisorLoanProfileV1ComplexSerializer,
BorrowerV1Serializer,
CoborrowerV1Serializer,
CreditRequestResponseSerializer,
EmploymentV1Serializer,
ExpenseV1Serializer,
HoldingAssetV1Serializer,
InsuranceAssetV1Serializer,
VehicleAssetV1Serializer,
IncomeV1Serializer,
LiabilityV1Serializer,
LoanProfileV1Serializer,
)
from loans.models import (
AddressV1, BorrowerV1, CoborrowerV1, EmploymentV1, ExpenseV1,
HoldingAssetV1, InsuranceAssetV1, VehicleAssetV1,
IncomeV1, LiabilityV1, LoanProfileV1,
)
from mismo_credit.models import CreditRequestResponse
from mismo_credit.tasks import start_credit_pull
from box.api_v1 import box_file_get
logger = logging.getLogger('sample.advisor_portal.views')
#
# Complex
#
class AdvisorLoanProfileV1ComplexView(AdvisorTokenAuthMixin,
AdvisorSetMixin,
viewsets.GenericViewSet,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.CreateModelMixin,):
"""
This is a complex view, which accepts JSON which
describes loan profile and creating loan profile
with all related objects automatically.
"""
permission_classes = (IsAuthenticated, AllowAdvisorPermission,)
serializer_class = AdvisorLoanProfileV1ComplexSerializer
def create(self, request, *args, **kwargs):
"""
Overriding to do avoid incomplete data response,
returning ID will be enough.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'id': serializer.data['id']}, status=status.HTTP_201_CREATED, headers=headers)
def retrieve(self, request, *args, **kwargs):
"""
Overriding to do a hack with related borrower and coborrower
objects.
"""
instance = self.get_object()
instance.borrower = instance.borrowers.last()
if instance.borrower:
instance.coborrower = instance.borrower.coborrower
serializer = self.get_serializer(instance)
return Response(serializer.data)
def get_queryset(self):
return self.request.user.loan_profilesV1.all()
advisor_loan_profile_complex_create_view = AdvisorLoanProfileV1ComplexView.as_view({'post': 'create'})
advisor_loan_profile_complex_view = AdvisorLoanProfileV1ComplexView.as_view({'get': 'retrieve'})
#
# RESTful
#
# Main
class AdvisorLoanProfileV1View(AdvisorTokenAuthMixin,
SelectForUpdateMixin,
AdvisorSetMixin,
NestedViewSetMixin,
viewsets.GenericViewSet,
viewsets.mixins.CreateModelMixin,
viewsets.mixins.UpdateModelMixin,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.DestroyModelMixin):
"""
Base loan profile view.
"""
permission_classes = [IsAuthenticated, AllowAdvisorPermission, LoanProfileModifyOperationsPermission, ]
serializer_class = LoanProfileV1Serializer
filter_class = LoanProfileInProgressFilter
pagination_class = SmallLimitOffsetPagination
filter_backends = [filters.OrderingFilter] + api_settings.DEFAULT_FILTER_BACKENDS
ordering = ('-respa_triggered_within_last_week', '-updated')
ordering_fields = ('updated', 'borrowers__first_name', 'borrowers__last_name',)
qs_filter_kwargs = {
'is_active': True,
'encompass_sync_status__in': [
LoanProfileV1.ENCOMPASS_SYNCED,
LoanProfileV1.ENCOMPASS_NEVER_SYNCED,
LoanProfileV1.ENCOMPASS_SYNC_FAILED,
]
}
prefetch_list = [
Prefetch('new_property_address'),
Prefetch('borrowers'),
Prefetch('borrowers__mailing_address'),
Prefetch('borrowers__demographics'),
Prefetch('borrowers__realtor'),
Prefetch('borrowers__realtor__address'),
Prefetch('borrowers__previous_addresses'),
Prefetch('borrowers__previous_employment'),
Prefetch('borrowers__holding_assets'),
Prefetch('borrowers__vehicle_assets'),
Prefetch('borrowers__insurance_assets'),
Prefetch('borrowers__income'),
Prefetch('borrowers__expense'),
Prefetch(
'borrowers__coborrower',
queryset=CoborrowerV1.objects.filter(is_active=True)
),
Prefetch('borrowers__coborrower__mailing_address'),
Prefetch('borrowers__coborrower__demographics'),
Prefetch('borrowers__coborrower__realtor'),
Prefetch('borrowers__coborrower__realtor__address'),
Prefetch('borrowers__coborrower__previous_addresses'),
Prefetch('borrowers__coborrower__previous_employment'),
Prefetch('borrowers__coborrower__holding_assets'),
Prefetch('borrowers__coborrower__vehicle_assets'),
Prefetch('borrowers__coborrower__insurance_assets'),
Prefetch('borrowers__coborrower__income'),
Prefetch('borrowers__coborrower__expense'),
Prefetch('credit_request_responses'),
]
def _get_paginated_lp_ids(self):
"""
To reduce time on ordering and slicing,
it is faster to take needed IDs first
to avoid decryption, and then simply select
needed loan profiles.
Filtered and sorted ids are paginated in the
way we're paginating simple queryset.
"""
qs = self.request.user.loan_profilesV1.filter(
**self.qs_filter_kwargs
).values_list(
'id', flat=True
)
qs = self.annotate_queryset(qs)
qs = self.filter_queryset(qs)
return self.paginate_queryset(qs)
def annotate_queryset(self, qs):
today = datetime.date.today()
week_ago = today - datetime.timedelta(days=7)
is_respa_triggered_within_last_week_expr = Case(
When(Q(_respa_triggered=True) & Q(updated__gt=week_ago), then=Value(True)),
default=Value(False),
output_field=BooleanField()
)
return qs.annotate(respa_triggered_within_last_week=is_respa_triggered_within_last_week_expr)
def get_paginated_qs(self):
assert hasattr(self, '_get_paginated_lp_ids'), "%s has not '_get_paginated_lp_ids' attribute" % self
qs = self.request.user.loan_profilesV1.prefetch_related(
*self.prefetch_list
).filter(
id__in=self._get_paginated_lp_ids()
)
qs = self.annotate_queryset(qs)
qs = filters.OrderingFilter().filter_queryset(self.request, qs, self)
return qs
def get_queryset(self):
return self.request.user.loan_profilesV1.prefetch_related(
*self.prefetch_list
).filter(
**self.qs_filter_kwargs
)
def list(self, request, *args, **kwargs):
"""
Overriding method because we don't need to paginate
queryset since we selecting needed loan profiles by
using `self._get_paginated_lp_ids()`.
"""
assert hasattr(self, 'get_paginated_qs'), "%s has not 'get_paginated_qs' attribute" % self
queryset = self.get_paginated_qs()
serializer = self.get_serializer(queryset, many=True)
return self.get_paginated_response(serializer.data)
def perform_destroy(self, instance):
instance.is_active = False
instance.save()
# properties
@decorators.detail_route(methods=['post'], permission_classes=[IsAuthenticated, AllowAdvisorPermission])
def storage(self, *args, **kwargs):
instance = self.get_object()
instance.create_storage()
if not instance.storage:
return Response(status=status.HTTP_400_BAD_REQUEST)
data = {'id': instance.storage.box_folder_id}
return Response(data=data, status=status.HTTP_201_CREATED)
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def new_property_address(self, request, *args, **kwargs):
"""
Endpoint-property, new property address of loan profile entry.
"""
view = CommonAddressView
view.filters = {'loanprofilev1': kwargs['pk']}
view.related_set_attr = 'loanprofilev1_set'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
# actions
@decorators.detail_route(methods=['post'])
def los_guid(self, *args, **kwargs):
"""
POST for LOS GUID
"""
data = {}
instance = self.get_object()
if instance.encompass_sync_status not in [
LoanProfileV1.ENCOMPASS_NEVER_SYNCED,
LoanProfileV1.ENCOMPASS_SYNC_FAILED
]:
logger.warning('LOS-GUID-REQUEST-SYNC-BAD-STATUS %s', instance.guid)
data['request_submitted'] = False
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
preflight_warnings = instance.encompass_sync_warnings()
if preflight_warnings:
data['request_submitted'] = False
data['warnings'] = preflight_warnings
logger.warning('LOS-GUID-PREFLIGHT-WARNINGS %s %s',
instance.guid, preflight_warnings)
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
try:
submitted = instance.sync_to_encompass()
except Exception:
submitted = False
data['request_submitted'] = submitted
http_status = status.HTTP_201_CREATED if submitted else status.HTTP_400_BAD_REQUEST
return Response(data=data, status=http_status)
@decorators.detail_route(methods=['post'])
def confirm_demographics_questions(self, *args, **kwargs):
instance = self.get_object()
instance.is_demographics_questions_request_confirmed = True
instance.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@decorators.detail_route(methods=['post'])
def credit_request(self, *args, **kwargs):
instance = self.get_object()
return start_credit_pull(instance.guid)
@decorators.detail_route(methods=['patch'])
def trigger_respa(self, *args, **kwargs):
instance = self.get_object()
data = instance.respa_criteria_for_advisor_portal()
http_status = status.HTTP_200_OK if instance.trigger_respa_for_advisor_portal() else status.HTTP_400_BAD_REQUEST
return Response(data=data, status=http_status)
@decorators.detail_route(methods=['get'])
def advisor_credit_pdf_view_url(self, *args, **kwargs):
instance = self.get_object()
summary = instance.find_valid_credit_report_summary()
if summary is None:
logging.exception("MISMO-CREDIT-SHARED-LINK-FAIL-NO-SUMMARY lp %s", instance.id)
data = {'url': ""}
resp_status = status.HTTP_404_NOT_FOUND
else:
#TODO: https://app.asana.com/0/26776562531082/310821218441711
box_file = box_file_get(summary.report_pdf_document.document_id)
shared_link = box_file.get_shared_link(access='company', allow_preview=True)
data = {'url': shared_link}
resp_status = status.HTTP_200_OK
return Response(data=data, status=resp_status)
@decorators.detail_route(methods=['post'])
def unlock_loan(self, *args, **kwargs):
instance = self.get_object()
if instance.lock_owner != instance.LOCK_OWNER_CHOICES.advisor:
instance.update_from_mortgage_profile()
instance.lock_owner = instance.LOCK_OWNER_CHOICES.advisor
instance.save()
data = {'lock_owner': instance.lock_owner}
return Response(data, status=status.HTTP_200_OK)
class AdvisorLoanProfileV1SyncInProgressView(AdvisorLoanProfileV1View):
"""
Loan profile view which represents LoanProfile's,
which are currently syncing with encompass.
"""
permission_classes = [IsAuthenticated, AllowAdvisorPermission, ]
serializer_class = LoanProfileV1Serializer
pagination_class = LargePagePagination
ordering = ('-updated')
qs_filter_kwargs = {
'is_active': True,
'encompass_sync_status__in': [
LoanProfileV1.ENCOMPASS_READY_TO_SYNC,
LoanProfileV1.ENCOMPASS_SYNC_IN_PROGRESS,
LoanProfileV1.ENCOMPASS_SYNC_FAILED,
]
}
advisor_loan_profile_v1_sync_in_progress_view = AdvisorLoanProfileV1SyncInProgressView.as_view({'get': 'list'})
# Credit Request
class AdvisorLoanProfileV1CreditRequestResponseView(
AdvisorTokenAuthMixin,
NestedViewSetMixin,
viewsets.ReadOnlyModelViewSet):
"""
Credit Report View
"""
permission_classes = [IsAuthenticated, AllowAdvisorPermission]
serializer_class = CreditRequestResponseSerializer
model = CreditRequestResponse
def get_queryset(self):
return self.filter_queryset_by_parents_lookups(
self.model.objects.all().prefetch_related(
Prefetch('credit_report_summary'),
Prefetch('credit_report_summary__credit_report_scores'),
)
)
# Borrower
class AdvisorLoanProfileV1BorrowerV1View(AdvisorLoanProfileV1BorrowerBaseView):
"""
Base borrower view.
"""
serializer_class = BorrowerV1Serializer
model = BorrowerV1
properties_mapping = {
'address': 'borrowerv1_address',
'mailing_address': 'borrowerv1_mailing_address',
'demographics': 'borrowerv1',
'employment': 'borrowerv1_employment',
'realtor': 'borrowerv1_realtor',
}
def perform_create(self, serializer):
loan_profile_id = self.kwargs['loan_profile']
try:
loan_profile = LoanProfileV1.objects.get(id=loan_profile_id)
except LoanProfileV1.DoesNotExist:
raise Http404('Loan profile with id "{}" does not exist'.format(loan_profile_id))
else:
serializer.save(loan_profile=loan_profile)
class BorrowerPreviousAddressesView(BorrowerResourcesMixin):
"""
Base view of borrower previous addresses.
"""
serializer_class = AddressV1Serializer
model = AddressV1
m2m_rel_attr = 'previous_addresses'
instance_count_maximum = 10
class BorrowerPreviousEmploymentsView(BorrowerResourcesMixin):
"""
Base view of borrower employment history.
"""
serializer_class = EmploymentV1Serializer
model = EmploymentV1
m2m_rel_attr = 'previous_employment'
instance_count_maximum = 10
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def address(self, request, *args, **kwargs):
"""
Endpoint-property, address of employment object.
"""
view = CommonAddressView
view.filters = {'employmentv1_address': kwargs['pk']}
view.related_set_attr = 'employmentv1_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def company_address(self, request, *args, **kwargs):
"""
Endpoint-property, company address of employment object.
"""
view = CommonAddressView
view.filters = {'employmentv1_company_address': kwargs['pk']}
view.related_set_attr = 'employmentv1_company_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
class BorrowerHoldingAssetsView(HoldingAssetsOwnershipMixin, BorrowerResourcesMixin):
"""
Base view of borrower holding assets.
"""
serializer_class = HoldingAssetV1Serializer
model = HoldingAssetV1
m2m_rel_attr = 'holding_assets'
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def institution_address(self, request, *args, **kwargs):
"""
Endpoint-property, institution address of holding asset object.
"""
view = CommonAddressView
view.filters = {'holdingassetv1_institution_address': kwargs['pk']}
view.related_set_attr = 'holdingassetv1_institution_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
class BorrowerVehicleAssetsView(BorrowerResourcesMixin):
"""
Base view of borrower vehicle assets.
"""
serializer_class = VehicleAssetV1Serializer
model = VehicleAssetV1
m2m_rel_attr = 'vehicle_assets'
class BorrowerInsuranceAssetsView(BorrowerResourcesMixin):
"""
Base view of borrower insurance assets.
"""
serializer_class = InsuranceAssetV1Serializer
model = InsuranceAssetV1
m2m_rel_attr = 'insurance_assets'
class BorrowerIncomesView(RestrictIncomesKindCreation, BorrowerResourcesMixin):
"""
Base view of borrower incomes.
"""
serializer_class = IncomeV1Serializer
model = IncomeV1
m2m_rel_attr = 'income'
class BorrowerExpensesView(RestrictKindCreation, BorrowerResourcesMixin):
"""
Base view of borrower expenses.
"""
serializer_class = ExpenseV1Serializer
model = ExpenseV1
m2m_rel_attr = 'expense'
class BorrowerLiabilitiesView(LiabilitiesRestrictionMixin, BorrowerResourcesMixin):
"""
Base view of borrower liabilities.
"""
serializer_class = LiabilityV1Serializer
model = LiabilityV1
m2m_rel_attr = 'liabilities'
# Coborrower
class AdvisorLoanProfileV1CoborrowerV1View(AdvisorLoanProfileV1BorrowerBaseView):
"""
Base borrower view.
"""
serializer_class = CoborrowerV1Serializer
model = CoborrowerV1
properties_mapping = {
'address': 'coborrowerv1_address',
'mailing_address': 'coborrowerv1_mailing_address',
'demographics': 'coborrowerv1',
'employment': 'coborrowerv1_employment',
'realtor': 'coborrowerv1_realtor',
}
@staticmethod
def _create_coborrower(borrower_id, serializer_instance):
try:
borrower = BorrowerV1.objects.get(id=borrower_id)
except BorrowerV1.DoesNotExist:
raise Http404('Borrower with id "{}" does not exist'.format(borrower_id))
else:
return serializer_instance.save(borrower=borrower)
@staticmethod
def _restore_coborrower(coborrower_obj, serializer_instance):
coborrower_obj.is_active = True
coborrower_obj.save()
serializer_instance.instance = coborrower_obj
return coborrower_obj
def perform_create(self, serializer):
borrower_id = self.kwargs['borrower']
try:
coborrower_obj = CoborrowerV1.objects.get(
borrower_id=borrower_id
)
except CoborrowerV1.DoesNotExist:
return self._create_coborrower(
borrower_id=borrower_id,
serializer_instance=serializer,
)
else:
return self._restore_coborrower(
coborrower_obj=coborrower_obj,
serializer_instance=serializer,
)
class CoborrowerPreviousAddressesView(CoborrowerResourcesMixin):
"""
Base view of coborrower previous addresses.
"""
serializer_class = AddressV1Serializer
model = AddressV1
m2m_rel_attr = 'previous_addresses'
instance_count_maximum = 10
class CoborrowerPreviousEmploymentsView(CoborrowerResourcesMixin):
"""
Base view of borrower employment history.
"""
serializer_class = EmploymentV1Serializer
model = EmploymentV1
m2m_rel_attr = 'previous_employment'
instance_count_maximum = 10
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def address(self, request, *args, **kwargs):
"""
Endpoint-property, address of employment object.
"""
view = CommonAddressView
view.filters = {'employmentv1_address': kwargs['pk']}
view.related_set_attr = 'employmentv1_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def company_address(self, request, *args, **kwargs):
"""
Endpoint-property, company address of employment object.
"""
view = CommonAddressView
view.filters = {'employmentv1_company_address': kwargs['pk']}
view.related_set_attr = 'employmentv1_company_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
class CoborrowerHoldingAssetsView(HoldingAssetsOwnershipMixin, CoborrowerResourcesMixin):
"""
Base view of coborrower holding assets.
"""
serializer_class = HoldingAssetV1Serializer
model = HoldingAssetV1
m2m_rel_attr = 'holding_assets'
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def institution_address(self, request, *args, **kwargs):
"""
Endpoint-property, institution address of holding asset object.
"""
view = CommonAddressView
view.filters = {'holdingassetv1_institution_address': kwargs['pk']}
view.related_set_attr = 'holdingassetv1_institution_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
class CoborrowerVehicleAssetsView(CoborrowerResourcesMixin):
"""
Base view of coborrower vehicle assets.
"""
serializer_class = VehicleAssetV1Serializer
model = VehicleAssetV1
m2m_rel_attr = 'vehicle_assets'
class CoborrowerInsuranceAssetsView(CoborrowerResourcesMixin):
"""
Base view of coborrower insurance assets.
"""
serializer_class = InsuranceAssetV1Serializer
model = InsuranceAssetV1
m2m_rel_attr = 'insurance_assets'
class CoborrowerIncomesView(RestrictIncomesKindCreation, CoborrowerResourcesMixin):
"""
Base view of coborrower incomes.
"""
serializer_class = IncomeV1Serializer
model = IncomeV1
m2m_rel_attr = 'income'
class CoborrowerExpensesView(RestrictKindCreation, CoborrowerResourcesMixin):
"""
Base view of coborrower expenses.
"""
serializer_class = ExpenseV1Serializer
model = ExpenseV1
m2m_rel_attr = 'expense'
class CoborrowerLiabilitiesView(LiabilitiesRestrictionMixin, CoborrowerResourcesMixin):
"""
Base view of borrower liabilities.
"""
serializer_class = LiabilityV1Serializer
model = LiabilityV1
m2m_rel_attr = 'liabilities'
| [
"dev@divethree.com"
] | dev@divethree.com |
c34bbcd9379d789cc7232e1d7875e51b44d1eb37 | 89616cc19e98adba8b895efc6be5f2f971d46095 | /Paying_the_Minimum.py | 671e37a5ea69c6d6d4b303bc31fee3ad213ca935 | [] | no_license | alangm7/Learn_Python_MIT_course_EdX | 419cebcf6badd31e7c078af2c00d1e374ed5301d | 96d9fe23c134290571ad69adfe9685854fabe10e | refs/heads/master | 2021-01-17T17:48:36.152311 | 2016-08-08T17:48:41 | 2016-08-08T17:48:41 | 61,170,376 | 1 | 0 | null | 2016-07-27T20:21:09 | 2016-06-15T02:14:31 | Python | UTF-8 | Python | false | false | 717 | py | """Write a program to calculate the credit card balance after one year if a person
only pays the minimum monthly payment required by the credit card company each month."""
balance = 4842
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
minPayment = ''
totalPaid = 0
month = 1
while month <= 12:
minPayment = monthlyPaymentRate * balance
balance -= minPayment
balance += (annualInterestRate/12.0)*balance
print 'Month:' + str(month)
print 'Minimum monthly payment:'+ str(round(minPayment,2))
print 'Remaining balance:' + str(round(balance,2))
totalPaid += minPayment
month += 1
print 'Total paid:'+ str(round(totalPaid,2))
print 'Remaining balance:' + str(round(balance,2) )
| [
"noreply@github.com"
] | alangm7.noreply@github.com |
c6be3b672181c172c8e313aaa5c7103906d40ec9 | 30554467bd73d9df8da66ae497b1f1fff17ecf00 | /all_tests_time.py | 9fd7fcd7cf7a524691e2c39e7864bd29606ffac2 | [] | no_license | wm0562/webAutomation | e38a8a5693906902acd3439626c9f1013c10303f | 1c814aec56c3533e49ab7fa7a468c6a3e8adea22 | refs/heads/master | 2020-08-27T06:35:45.645169 | 2020-05-25T07:14:20 | 2020-05-25T07:14:20 | 217,271,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # coding:utf-8
import HTMLTestRunner
import unittest
import os
import time
listaa = 'E:\\20190611\\python\\Python27\\Lib\\unittest\\untitled\\test_case'
def creatsuite1():
testunit = unittest.TestSuite()
discover = unittest.defaultTestLoader.discover(listaa, pattern='start_*.py', top_level_dir=None)
for test_suite in discover:
for test_case in test_suite:
testunit.addTest(test_case)
print testunit
return testunit
alltestnames = creatsuite1()
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
filename = 'E:\\20190611\\python\\Python27\\report\\'+now+'result.html'
fp = file(filename, 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'百度搜索测试用例',
description=u'用例执行情况: '
)
##########控制什么时间执行脚本#####
k = 1
while k < 2:
timing = time.strftime('%H_%M', time.localtime(time.time()))
if timing == '10_21':
print u'开始运行脚本: '
runner.run(alltestnames)
print u'运行完成退出'
break
else:
time.sleep(10)
print timing
| [
"1455448378@qq.com"
] | 1455448378@qq.com |
18001c5a5d6ca46f07163777075ca1e3bc06b94a | 362ae35a6be9fa575030cafcaddcec2cfde6112f | /groupbuying/models.py | bb0753306dd5adace1f7e5f2dc9c637f3f3b6b1d | [] | no_license | JUKOOK/DB_project_moaU | 197585648dae1d0a5578973d06ded6a0685cc2d7 | 28a2ba6ee22c8b2955964235ee7a3c4c7c11e3df | refs/heads/master | 2021-04-03T08:56:05.893364 | 2018-06-21T04:24:44 | 2018-06-21T04:24:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | from __future__ import unicode_literals
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from django.contrib.auth.models import User
from django.db import models
import datetime
import os
#http://hjh5488.tistory.com/12
def set_filename_format(now, instance, filename):
""" file format setting
e.g)
{username}-{date}-{microsecond}{extension} hjh-2016-07-12-158859.png
"""
return "{date}-{microsecond}{extension}".format(
date=str(now.date()),
microsecond=now.microsecond,
extension=os.path.splitext(filename)[1],
)
def user_directory_path(instance, filename):
"""
image upload directory setting
e.g)
images/{year}/{month}/{day}/{username}/{filename}
images/2016/7/12/hjh/hjh-2016-07-12-158859.png
"""
now = datetime.datetime.now()
path = "images/bookmarket/{filename}".format(
filename=set_filename_format(now, instance, filename),
)
return path
class Party_Open(models.Model):
# category = models.CharField(max_length = 40, choices = CTCHOICE,default = ETC)
RECRUITING = 'RE'
SFAIL = 'SF'
SSUCC = 'SS'
SCANC = 'SC'
CTCHOICE = (
(RECRUITING, 'Recruiting'),
(SFAIL, 'Sfail'),
(SSUCC, 'Ssucc'),
(SCANC, 'Scanc'),
)
title = models.CharField(max_length = 40)
category = models.CharField(max_length = 40)
content = models.CharField(max_length = 40)
open_date = models.DateTimeField()
num_person = models.IntegerField()
now_person = models.IntegerField(default = 1)
closing_date = models.DateTimeField()
image = models.ImageField(upload_to=user_directory_path, default =0)
condition = models.CharField(max_length =10, choices = CTCHOICE,default = RECRUITING)
recruiter = models.ForeignKey(User, on_delete = models.CASCADE)
def __str__(self):
return self.title
class Apply_Applicant(models.Model):
apply_date = models.DateTimeField()
comment = models.CharField(max_length = 100)
applicant = models.ForeignKey(User, on_delete=models.CASCADE)
party = models.ForeignKey(Party_Open, on_delete=models.CASCADE)
| [
"noreply@github.com"
] | JUKOOK.noreply@github.com |
40d1e25bc0c5d3dca3d376dbfa1cda50082c6f1e | 5a41707da9763a9e9b41a9bfb6caf1c87f7e6816 | /eklentiler/tr.py | 83ce14244f5d31c1036df56c116ca463e0b8cbbe | [] | no_license | kizilsancakailesi/ahmetkaya | 6d14bde53010dda41a50c13921c50b1ec7915804 | 2317dac92b6931a9364986d714e6d1994b052af8 | refs/heads/main | 2023-08-11T10:46:55.884293 | 2021-09-23T22:13:34 | 2021-09-23T22:13:34 | 409,739,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | import os
from config import ASSISTANT_NAME
HELP_TEXT =f"**SETTINGS** \n\n`1) Botumuzu Gruba ekleyin ve yönetici yapın` \n2) **Add** @{ASSISTANT_NAME} **Grubunuza** \n\n**COMMANDS** \n\n- `/vizyon` **: Yanıt verdiğiniz video dosyasını seste oynatır.** \n- `/durvizyon` **: Akışı durdur**"
START_TEXT = '👋 `Hey My Name Is` **VcVideoPlayer** \n\n`Lets Enjoy Cinematic View of Group Video Player With Your Friends ❤️😊`'
ABOUT_TEXT = '**👩💻 INFORMATION** \n\n`🤖 This bot was created to stream video in telegram group video chats using several methods from WebRTC.` \n\n`💡 Powered by PyTgcalls the Async client API for the Telegram Group Calls, and Pyrogram the telegram MTProto API Client Library and Framework in Pure Python for Users and Bots.` \n\n**This bot licensed under GNU-GPL 3.0 License**'
DEVS_TEXT =f"**Reisler TV ** \n\n`Here Some Developers Helping in Making The` @{ASSISTANT_NAME} `Bot.`"
| [
"noreply@github.com"
] | kizilsancakailesi.noreply@github.com |
6038afd572bebeb6555821f05e0710b04f59d809 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_3/wllgar015/question3.py | 73caabd33916af860b5d886602e949a64214f59d | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | #question 3
message=input("Enter the message:\n")
repeat=eval(input("Enter the message repeat count:\n"))
frame=eval(input("Enter the frame thickness:\n"))
def awesome(message, repeat, frame):
#Frame 1 is two characters greater than the input string
#Printing the string messages would work quite well.
#The repeat is how many lines of the top frame you will print.
#For example, repeat = 2, you will need to print 2 lines worth of frame
#at the top and two at the bottom. The 2 also reflects the lines you would need
#at the sides of the message itself.
if frame>0:
print("+","-"*(len(message)+2*frame),"+",sep="")
#counters help a lot ;)
count=1
dcount=1
#first loop for top frame
for i in range(frame-1): #it is frame-1 because we have printed one of the frames already. range(frame) will result in an extra unneccessary line.
print("|"*(count),"+",(len(message)+2*frame-2*dcount)*"-","+","|"*(count),sep="")
count+=1
dcount+=1
#second loop for message
for i in range(repeat):
print("|"*frame,message,"|"*frame)
#third loop for bottom frame which is the inverse of the top loop
count=frame-1
dcount=frame-1
#first loop for top frame
for i in range(frame-1): #it is frame-1 because we have printed one of the frames already. range(frame) will result in an extra unneccessary line.
print("|"*(count),"+",(len(message)+2*frame-2*dcount)*"-","+","|"*(count),sep="")
count-=1
dcount-=1
if frame>0:
print("+","-"*(len(message)+2*frame),"+",sep="")
awesome(message, repeat, frame)
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
513654fb878d5d60ea233b97c3d338cfa5f2e9d1 | 1e7cf1927e31410a81c46e7fc474dd01b76065aa | /hello.py | e68f1245460b252e7de2a005486956a88db52042 | [] | no_license | bytewing/TestProject | de92054413e7583e2016186e00e7a8d898a47882 | 778f273acde4550668718a3dbdde0eeed5457cb6 | refs/heads/master | 2020-12-25T18:32:01.489437 | 2013-10-25T15:45:25 | 2013-10-25T15:45:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #@author
print("Hello, world, foobar!")
print("Added second line")
print("Added third line")
| [
"palghat5@yahoo.com"
] | palghat5@yahoo.com |
53fbc822653686579c37de3cd611696e748a5c09 | 60881f0700857739a18b0bd57178296ef6f33845 | /Web/article.py | be8468a2affd0777827954e8386f35d07787b283 | [] | no_license | MagicalLas/WebCroller | 51ccd7499f76584161c2d181c517b37d109bd048 | 156bffa305329c1d97702ed556ac807f8a970c64 | refs/heads/master | 2022-01-29T09:45:07.623818 | 2019-07-03T06:51:12 | 2019-07-03T06:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | import json
import os.path
class Article(object):
def __init__(self, blog_id, first_article_id, no):
self.name = f"{blog_id}-{first_article_id}-{no}"
self.blog = blog_id
self.first = first_article_id
self.no = int(no)
self.now = 1
@property
def progress(self):
return int((self.now/self.no)*100)
@property
def now_state(self):
return json.dumps({"progress":self.progress, "link":f"{self.link}"})
@property
def link(self):
return f"{self.name}.txt"
@property
def is_exist(self):
return os.path.isfile(f"./static/{self.link}") | [
"haho013039@gmail.com"
] | haho013039@gmail.com |
b4efc36351dcb2e025c1f119f5ff933374b05d18 | d69e6fa9c659c896dfae09196d0a5795709d169e | /unisan/penelitiankompetitif/urls.py | 8a85819a8021a9313ebdf08c047c3aaeae3ff5ab | [
"Apache-2.0"
] | permissive | kurniantoska/ichsan_proj | 3be7e4fd233c18955ea54a905b8c8442af8ebe8c | f79cbcb896df902e129dcdb2affc89dc4f3844ef | refs/heads/master | 2022-12-05T21:18:46.605502 | 2019-07-25T14:47:42 | 2019-07-25T14:47:42 | 101,473,964 | 0 | 0 | Apache-2.0 | 2022-12-04T04:39:44 | 2017-08-26T08:41:46 | HTML | UTF-8 | Python | false | false | 420 | py | from django.urls import path
from django.views.generic import TemplateView
from penelitiankompetitif.views import IsiDataDosen
app_name = 'pen_komp'
urlpatterns = [
path(
'penelitian_kompetitif/',
TemplateView.as_view(template_name='home_penelitian.html'),
name='home_pen'
),
path(
'isi_biodata_dosen/',
IsiDataDosen.as_view(),
name='isi_data_dosen'
),
]
| [
"alternative.xen.sakura@gmail.com"
] | alternative.xen.sakura@gmail.com |
1bf4f9d648cd3cf3e5bbe19c856fae2872eaeedf | 2c57bcab9bd78c1382690b1c809315c113ba9353 | /download_main.py | a0a8bccb9d6436494391aa018de38d6763140b72 | [] | no_license | DL-03/Launcher_Myndustry_DL | 324bbfb38a8cfb611c4ae5814786749d0ee8e654 | 76a95cec688b17211e328ae7b42363f87207549f | refs/heads/master | 2023-02-19T17:00:23.015249 | 2021-01-24T20:51:50 | 2021-01-24T20:51:50 | 325,632,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | import time
import PySimpleGUI as sg
from urllib import request
import os
load = os.getcwd()
y = open( load + '\\num.txt', 'r' ).readline()
save = open( f'{load}\\save.txt', 'r' ).readline()
sg.one_line_progress_meter(f'Downloading {y}', 0, 2, 'key','Подождите, скачивание файлов из интернета будут проводица в зависнутом окне(не отвечает!)')
request.urlretrieve(f'https://github.com/Anuken/MindustryBuilds/releases/download/{y}/Mindustry-BE-Desktop-{y}.jar', f'{save}{y}.jar')
sg.one_line_progress_meter(f'Downloading {y}', 1, 2, 'key','Готово!')
time.sleep(1)
sg.one_line_progress_meter(f'Downloading {y}', 2, 2, 'key','Готово!')
| [
"noreply@github.com"
] | DL-03.noreply@github.com |
2945bd43ce3eb68f057fe466e6172a171fa02d17 | c10121b33f2c2e2f5abe1499691d5d0cb18219ba | /lessons/ex25.py | 1afce7e2bf709e30c943fa6a02844466a41b33e3 | [] | no_license | denver/learn_python | 9782b9d42c895b56700149574daba03719605148 | 7dfc21ca52e8dbfe1e588da84f63727ee29e3661 | refs/heads/master | 2020-04-10T10:04:20.324942 | 2015-10-17T18:46:25 | 2015-10-17T18:46:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | #learn python the hard way example 25
def break_words(stuff):
"""This function will break up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Print the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full sentence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words then prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
| [
"denver.peterson@gmail.com"
] | denver.peterson@gmail.com |
deb7b7f4dfdf2710af17469506b61251b7c10a45 | 7ed34770064d3ee7e5fbbb65873de54fce7a7da0 | /second_project/second_project/settings.py | 303865567d8bb1299b791b8f1714f5aff8551bd6 | [] | no_license | Vamsichowdhury/models | c8f078179901a01ae7dc4aafefadac01f49be701 | bc3057d974ae05f48f2154b0a85789d576f73650 | refs/heads/master | 2020-04-11T15:40:36.952487 | 2018-12-15T11:36:36 | 2018-12-15T11:36:36 | 161,899,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | """
Django settings for second_project project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xn7^s4#3i0n=nao2$)_dloq0co*d-z9wrpq53g&)7(&1tx!3++'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'second_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'second_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates"),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'second_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"vamsichowdhury@gmail.com"
] | vamsichowdhury@gmail.com |
a1c41c69498157a0a988fb9273012070ddde4885 | acd5c39fa9f041793faabc8f3742e79dd5c1994f | /aboutme/urls.py | 622c4fe78e506495674964d01a67b15c5718e32d | [] | no_license | jakem91/collab-website | 6e884f3b931f021a586cf305d1fdd5780dbd3b35 | 3d3340d5fd9c84b0bab47a0db5243586c8bd0851 | refs/heads/main | 2023-02-03T06:02:46.225749 | 2020-11-28T21:16:50 | 2020-11-28T21:16:50 | 316,829,494 | 0 | 0 | null | 2020-11-28T22:01:20 | 2020-11-28T22:01:20 | null | UTF-8 | Python | false | false | 121 | py | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.aboutme, name='aboutme'),
]
| [
"steveyburns@hotmail.com"
] | steveyburns@hotmail.com |
3562329aabb2e30dbc9414a3712de5d7d02fb243 | e0d9a87b0fb3a207d118421c43af70e0e2b22ba2 | /S5644/code.py | 55938d98efd2339b933e1f8451ed5b89c3e75762 | [
"MIT"
] | permissive | DokySp/acmicpc-practice | f20c9341aa114d8c7f5d733ba888925052d81768 | 2fa967ad4cd30797f195796f1371f7017cbc06c5 | refs/heads/master | 2023-04-08T08:17:46.890732 | 2023-04-04T08:06:23 | 2023-04-04T08:06:23 | 252,925,188 | 0 | 0 | MIT | 2020-04-04T06:30:52 | 2020-04-04T06:21:27 | null | UTF-8 | Python | false | false | 3,762 | py | import math
import sys
sys.stdin = open("sample.txt", "r")
class Dirr:
up = 1
right = 2
down = 3
left = 4
T = int(input())
for ttttt in range(T):
M, A = input().split()
M = int(M)
A = int(A)
pmove = []
ploc = [[0, 0], [9, 9]]
aploc = []
aprange = []
appower = []
for k in range(2):
atmp = input().split()
ttt = []
for i in atmp:
ttt.append(int(i))
# 마지막 이동하고나서 점수 안구함...
ttt.append(0)
pmove.append(ttt)
for i in range(A):
atmp = input().split()
# 반대...!
aploc.append([int(atmp[1]) - 1, int(atmp[0]) - 1])
aprange.append(int(atmp[2]))
appower.append(int(atmp[3]))
# 1. 도달 기지국 검색
# 2. 기지국 배정 (겹치는 거 없앰)
# 3. 점수 계산
# 4. A, B 움직임
score = 0
for idx in range(len(pmove[0])):
# print("#", idx)
# print(ploc)
# print(pmove[0][idx], pmove[1][idx])
# 1. 도달 기지국 검색
reachedq = []
pn = 0
for p in ploc:
for app in range(len(aploc)):
dist = int(math.fabs(aploc[app][0] - p[0])) + int(math.fabs(aploc[app][1] - p[1]))
# print(dist, " / ", aploc[app][0], p[0], aploc[app][1], p[1])
if dist <= aprange[app]:
reachedq.append([pn, app, appower[app]])
pn += 1
# 2. 기지국 배정 (겹치는 거 없앰)
# 2-1. 파워 순으로 정렬
# 2-2. 제일 높은 파워 순으로 배정
# 겹치는 구역이 제일 적은 놈부터 높은거 배정
# 같이 같은 기지국 쓰는 경우, 반으로 나눠가짐!
score1 = 0
score2 = 0
rq1 = []
rq2 = []
score1 = 0
targa1 = 0
targp1 = 0
score2 = 0
targa2 = 0
targp2 = 0
if len(reachedq) != 0:
# 그냥 둘 다 계산해서 큰거로 가져가야할듯
reachedq.sort(key=lambda x: x[2], reverse=True)
# if dup[0] > dup[1]:
reachedq.sort(key=lambda x: x[0], reverse=True)
rq1 = [i for i in reachedq]
score1 += rq1[0][2]
targp1 = rq1[0][0]
targa1 = rq1[0][1]
# print(rq1)
reachedq.sort(key=lambda x: x[0], reverse=False)
rq2 = [i for i in reachedq]
score2 += rq2[0][2]
targp2 = rq2[0][0]
targa2 = rq2[0][1]
# print(rq2)
# print(reachedq[0][2], end=" / ")
# else:
# print(0, end=" / ")
# 3. 점수 계산
ppp = 0
while ppp < len(rq1):
if targp1 == rq1[ppp][0] or targa1 == rq1[ppp][1]:
rq1.pop(ppp)
else:
ppp += 1
if len(rq1) != 0:
score1 += rq1[0][2]
ppp = 0
while ppp < len(rq2):
if targp2 == rq2[ppp][0] or targa2 == rq2[ppp][1]:
rq2.pop(ppp)
else:
ppp += 1
if len(rq2) != 0:
score2 += rq2[0][2]
if len(reachedq) != 0:
score += max(score1, score2)
# print(score1, score2)
# 4. A, B 움직임
for p in range(2):
if pmove[p][idx] == Dirr.left:
ploc[p][1] -= 1
elif pmove[p][idx] == Dirr.right:
ploc[p][1] += 1
elif pmove[p][idx] == Dirr.down:
ploc[p][0] += 1
elif pmove[p][idx] == Dirr.up:
ploc[p][0] -= 1
# print(score)
# print()
print("#" + str(ttttt+1), score)
| [
"uhug@naver.com"
] | uhug@naver.com |
ca85e7f42f83e71dcfbd85aea093539662402707 | bac4daf67a7cb5f613846325b844d02d387965d5 | /sum_row_elements.py | fd06c0657d4e2537a01690ea2ae9c773e2bf9af3 | [] | no_license | NHTdz/baitapthem | 52391bb3d313e3235a174c1ba022bab5f247b32d | 5c452b40088e996d19040ea9a7d42cf24b2ca45a | refs/heads/master | 2023-08-29T23:17:37.229009 | 2021-10-22T06:02:44 | 2021-10-22T06:02:44 | 398,396,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | import random
import numpy as np
def matrix(m,n):
lst = [[random.randint(1,10) for e in range(n)]for e in range(m)]
lst = np.array(lst)
return lst
def sum_row_elements(a,m):
sum = [0,]
for i in range(m):
sum += a[i]
return sum
m = int(input("Nhập số hàng: "))
n = int(input("Nhập số cột: "))
a = matrix(m,n)
tup = tuple(sum_row_elements(a,m))
print(a)
print(tup)
| [
"thangnguyen130703@gmail.com"
] | thangnguyen130703@gmail.com |
2e2fc4e9afbe3e966998d5ddebe6210973b507bb | 7b7c81e39169b7769d2b14618b7fb8f3ef5ea1dc | /tests/MMApp/test_app_admin.py | 7767db100b4a4c0e69fbca15dd345f704f20759c | [] | no_license | tkincaid/tkincaid.github.com | cf349c143056b847c8281d8d363b686a679f6499 | 8a9ab9ea4a061573328b5fcca6706536062e3be5 | refs/heads/master | 2016-09-05T09:24:48.325828 | 2014-09-24T16:49:14 | 2014-09-24T16:49:14 | 21,217,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,436 | py | import unittest
import json
from mock import patch
from config.engine import EngConfig
from common.wrappers import database
from MMApp.entities.user import UserModel
from MMApp.entities.admin import AdminAccessError
class TestAdminBlueprint(unittest.TestCase):
def setUp(self):
self.addCleanup(self.stopPatching)
self.app = None
with patch.dict(EngConfig, {'TEST_MODE': True }, clear = False):
import MMApp.app
self.app = MMApp.app.app.test_client()
self.patchers = []
get_user_session_patch = patch('MMApp.app_admin.get_user_session')
self.patchers.append(get_user_session_patch)
self.mock_get_user_session = get_user_session_patch.start()
mock_user = {
'uid': database.ObjectId(None),
'username':'project-owner@datarobot.com'
}
self.mock_get_user_session.return_value = UserModel(**mock_user)
user_flags_admin_patch = patch('MMApp.app_admin.UserFlagsAdmin')
self.patchers.append(user_flags_admin_patch)
self.MockUserFlagsAdmin = user_flags_admin_patch.start()
user_service_patch = patch('MMApp.app_admin.UserService')
self.patchers.append(user_service_patch)
self.MockUserservice = user_service_patch.start()
self.MockUserservice.return_value.get_account.return_value = mock_user
def stopPatching(self):
super(TestAdminBlueprint, self).tearDown()
for patcher in self.patchers:
if patcher:
patcher.stop()
def test_save_profile(self):
user = {
'max_workers' : 10,
'permissions' : {
'PERMISSION_1' : True,
'PERMISSION_2' : False
}
}
response = self.app.post('/users/{}'.format(database.ObjectId(None)),
content_type='application/json', data=json.dumps(user))
self.assertEqual(response.status_code, 200)
def test_get_permissions_lists(self):
response = self.app.get('/users/permissions')
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data)
self.assertTrue(response_data['permissions'])
def test_activate_account(self):
user = {
'activated' : 0
}
response = self.app.post('/users/{}/activate'.format(database.ObjectId(None)),
content_type = ' application/json', data = json.dumps(user))
self.assertEqual(response.status_code, 200)
def test_search_users(self):
keyword = 'hello'
admin_service = self.MockUserFlagsAdmin.return_value
users = [
UserModel(username='user1'),
UserModel(username='user2'),
UserModel(username='user3'),
]
admin_service.search_users_by_name_and_username.return_value = users
response = self.app.get('/users/search/{}'.format(keyword))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.data)
self.assertEqual(len(response_data['users']), len(users))
def test_search_users_without_permissions(self):
admin_service = self.MockUserFlagsAdmin.return_value
admin_service.search_users_by_name_and_username.side_effect = AdminAccessError()
response = self.app.get('/users/search/{}'.format('hi'))
self.assertEqual(response.status_code, 403) | [
"tom.kincaid@datarobot.com"
] | tom.kincaid@datarobot.com |
be2f33435c3fe867fe75bf726865ef3314474322 | 7d5f3221a2f141ff91eb003fe444af78bf6af077 | /week6/movie.py | 61378f4dbdd9bc2b8ba7fcc993d59ad0ccf49609 | [] | no_license | kkashev/hackbulgaria | abaddbe476ceaa6fb2eb4e981b70be7d95f76943 | ada2a73a311e9af8f7427971241c3a25828ae9e6 | refs/heads/master | 2021-01-21T17:42:03.996637 | 2014-12-08T16:36:10 | 2014-12-08T16:36:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from connection import Base
from sqlalchemy import Column, Integer, String, Float
class Movie(Base):
__tablename__ = "movies"
id = Column(Integer, primary_key=True)
name = Column(String)
rating = Column(Float)
def __str__(self):
return "{} - {} - {}".format(self.id, self.name, self.rating)
def __repr__(self):
return self.__str__()
| [
"kkashev@gmail.com"
] | kkashev@gmail.com |
dc11c2a9a91ce330d48bdf58adb1905e8abc7e5f | 219b7903ad9b16acb4790f561952021e60f23abe | /giftexchange/admin.py | 671384fdd7635bf9f0761dcda08bdcce3a118864 | [] | no_license | astromitts/gifterator3000 | 40663de82526ef874c05d9385f53e6c2e3cb1625 | 64a8c420eb7b729c96861aa430f7f15cbe499d3d | refs/heads/master | 2023-01-28T06:47:09.110323 | 2020-12-04T01:43:53 | 2020-12-04T01:43:53 | 293,815,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from django.contrib import admin
from giftexchange.models import (
GiftExchange,
Participant,
AppUser,
ExchangeAssignment,
AppInvitation,
MagicLink
)
class ParticipantInline(admin.TabularInline):
model = Participant
fields = ['appuser']
class GiftExchangeAdmin(admin.ModelAdmin):
inlines = [
ParticipantInline,
]
list_display = ['title', 'date']
admin.site.register(AppUser)
admin.site.register(GiftExchange, GiftExchangeAdmin)
admin.site.register(Participant)
admin.site.register(ExchangeAssignment)
admin.site.register(AppInvitation)
admin.site.register(MagicLink)
| [
"morinbe@gmail.com"
] | morinbe@gmail.com |
ed6deb8cf63883ec1dce2f0f037fd6877ffbaea1 | c8c9278ffb74da44789b310540693c66468b998c | /shop/urls.py | 87f494b760e5ec8a18cf0fe7c69c724930fc0633 | [] | no_license | danielspring-crypto/ekit | bdfcec4b2ce8e36bb9e692f7a825bfce0bbf4166 | 560f6fa5522e1cb4a2cf30325b2b1b07beceea3d | refs/heads/main | 2023-01-28T00:20:22.058791 | 2020-12-07T08:33:06 | 2020-12-07T08:33:06 | 319,253,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from django.urls import path
from .import views
app_name = 'shop'
urlpatterns = [
path('', views.product_list, name='product_list'),
path('<slug:category_slug>/', views.product_list, name='product_list_by_category'),
path('<int:id>/<slug:slug>/', views.product_detail, name='product_detail'),
] | [
"borncode3@gmail.com"
] | borncode3@gmail.com |
4193d0bff10f9cc050ef484fe5020c35f4d6a393 | f925df02921953acda77cd415762d690fcca020e | /SoundNet-tensorflow/main.py | 3af8b8f79c1a53a65d1d4e4f080d2cee52e96148 | [
"MIT"
] | permissive | vinitra/music-score-gen | 11b77464e0eae95e74b967a66d795fd80cc0c368 | 2eb37d20a4ad77994b43bfaa9f415af4817e578a | refs/heads/master | 2020-03-10T15:28:48.111674 | 2018-05-12T04:41:14 | 2018-05-12T04:41:14 | 129,449,500 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,394 | py | # TensorFlow version of NIPS2016 soundnet
# Required package: librosa: A python package for music and audio analysis.
# $ pip install librosa
from ops import batch_norm, conv2d, relu, maxpool
from util import preprocess, load_from_list, load_audio
from model import Model
from glob import glob
import tensorflow as tf
import numpy as np
import argparse
import time
import sys
import os
# Make xrange compatible in both Python 2, 3
try:
xrange
except NameError:
xrange = range
local_config = {
'batch_size': 1,
'train_size': np.inf,
'epoch': 200,
'eps': 1e-5,
'learning_rate': 1e-3,
'beta1': 0.9,
'load_size': 22050*4,
'sample_rate': 22050,
'name_scope': 'SoundNet',
'phase': 'train',
'dataset_name': 'ESC50',
'subname': 'mp3',
'checkpoint_dir': 'checkpoint',
'dump_dir': 'output',
'model_dir': None,
'param_g_dir': './models/sound8.npy',
}
class Model():
def __init__(self, config=local_config, param_G=None):
# self.sess = session
self.config = config
self.param_G = param_G
self.g_step = tf.Variable(0, trainable=False)
self.counter = 0
self.model()
def model(self):
# Placeholder
self.label_sound_placeholder = tf.placeholder(tf.float32,
shape=[self.config['batch_size']*(1 + self.config['num_noise_samples']), None, 1, 1]) # (batch * noise samples) x h x w x channel
# self.object_dist = tf.placeholder(tf.float32,
# shape=[self.config['batch_size'], None, 1000]) # batch x h x w x channel
# self.scene_dist = tf.placeholder(tf.float32,
# shape=[self.config['batch_size'], None, 401]) # batch x h x w x channel
# Generator: TODO FIX BAD HACK HERE
self.add_generator(self.label_sound_placeholder, name_scope=self.config['name_scope'])
# KL Divergence
# self.object_loss = self.KL_divergence(self.layers[25], self.object_dist, name_scope='KL_Div_object')
# self.scene_loss = self.KL_divergence(self.layers[26], self.scene_dist, name_scope='KL_Div_scene')
# self.loss = self.object_loss + self.scene_loss
# Summary
# self.loss_sum = tf.summary.scalar("g_loss", self.loss)
# self.g_sum = tf.summary.merge([self.loss_sum])
# self.writer = tf.summary.FileWriter("./logs", self.sess.graph)
# variable collection
# self.g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
# scope=self.config['name_scope'])
# self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=12,
# max_to_keep=5,
# restore_sequentially=True)
# Optimizer and summary
# self.g_optim = tf.train.AdamOptimizer(self.config['learning_rate'], beta1=self.config['beta1']) \
# .minimize(self.loss, var_list=(self.g_vars), global_step=self.g_step)
# Initialize
# init_op = tf.global_variables_initializer()
# self.sess.run(init_op)
# Load checkpoint
# if self.load(self.config['checkpoint_dir']):
# print(" [*] Load SUCCESS")
# else:
# print(" [!] Load failed...")
def fetch_scene_embedding(self, hidden_dim):
# TODO: add another layer to fine tun over layer 24?
return tf.expand_dims(tf.reduce_sum(self.layers[22], axis=1), axis=1)
def add_generator(self, data, name_scope='SoundNet'):
with tf.variable_scope(name_scope) as scope:
self.layers = {}
# Stream one: conv1 ~ conv7
self.layers[1] = conv2d(data, 1, 16, k_h=64, d_h=2, p_h=32, name_scope='conv1')
self.layers[2] = batch_norm(self.layers[1], 16, self.config['eps'], name_scope='conv1')
self.layers[3] = relu(self.layers[2], name_scope='conv1')
self.layers[4] = maxpool(self.layers[3], k_h=8, d_h=8, name_scope='conv1')
self.layers[5] = conv2d(self.layers[4], 16, 32, k_h=32, d_h=2, p_h=16, name_scope='conv2')
self.layers[6] = batch_norm(self.layers[5], 32, self.config['eps'], name_scope='conv2')
self.layers[7] = relu(self.layers[6], name_scope='conv2')
self.layers[8] = maxpool(self.layers[7], k_h=8, d_h=8, name_scope='conv2')
self.layers[9] = conv2d(self.layers[8], 32, 64, k_h=16, d_h=2, p_h=8, name_scope='conv3')
self.layers[10] = batch_norm(self.layers[9], 64, self.config['eps'], name_scope='conv3')
self.layers[11] = relu(self.layers[10], name_scope='conv3')
self.layers[12] = conv2d(self.layers[11], 64, 128, k_h=8, d_h=2, p_h=4, name_scope='conv4')
self.layers[13] = batch_norm(self.layers[12], 128, self.config['eps'], name_scope='conv4')
self.layers[14] = relu(self.layers[13], name_scope='conv4')
self.layers[15] = conv2d(self.layers[14], 128, 256, k_h=4, d_h=2, p_h=2, name_scope='conv5')
self.layers[16] = batch_norm(self.layers[15], 256, self.config['eps'], name_scope='conv5')
self.layers[17] = relu(self.layers[16], name_scope='conv5')
self.layers[18] = maxpool(self.layers[17], k_h=4, d_h=4, name_scope='conv5')
self.layers[19] = conv2d(self.layers[18], 256, 512, k_h=4, d_h=2, p_h=2, name_scope='conv6')
self.layers[20] = batch_norm(self.layers[19], 512, self.config['eps'], name_scope='conv6')
self.layers[21] = relu(self.layers[20], name_scope='conv6')
self.layers[22] = conv2d(self.layers[21], 512, 1024, k_h=4, d_h=2, p_h=2, name_scope='conv7')
self.layers[23] = batch_norm(self.layers[22], 1024, self.config['eps'], name_scope='conv7')
self.layers[24] = relu(self.layers[23], name_scope='conv7')
# Split one: conv8, conv8_2
# NOTE: here we use a padding of 2 to skip an unknown error
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/common_shape_fns.cc#L45
self.layers[25] = conv2d(self.layers[24], 1024, 1000, k_h=8, d_h=2, p_h=2, name_scope='conv8')
self.layers[26] = conv2d(self.layers[24], 1024, 401, k_h=8, d_h=2, p_h=2, name_scope='conv8_2')
return self.layers[26]
def train(self):
"""Train SoundNet"""
start_time = time.time()
# Data info
data = glob('./data/*.{}'.format(self.config['subname']))
batch_idxs = min(len(data), self.config['train_size']) // self.config['batch_size']
for epoch in xrange(self.counter//batch_idxs, self.config['epoch']):
for idx in xrange(self.counter%batch_idxs, batch_idxs):
# By default, librosa will resample the signal to 22050Hz. And range in (-1., 1.)
sound_sample = load_from_list(data[idx*self.config['batch_size']:(idx+1)*self.config['batch_size']], self.config)
# Update G network
# NOTE: Here we still use dummy random distribution for scene and objects
_, summary_str, l_scn, l_obj = self.sess.run([self.g_optim, self.g_sum, self.scene_loss, self.object_loss],
feed_dict={self.sound_input_placeholder: sound_sample, \
self.scene_dist: np.random.randint(2, size=(1, 1, 401)), \
self.object_dist: np.random.randint(2, size=(1, 1, 1000))})
self.writer.add_summary(summary_str, self.counter)
print ("[Epoch {}] {}/{} | Time: {} | scene_loss: {} | obj_loss: {}".format(epoch, idx, batch_idxs, time.time() - start_time, l_scn, l_obj))
if np.mod(self.counter, 1000) == 1000 - 1:
self.save(self.config['checkpoint_dir'], self.counter)
self.counter += 1
#########################
# Loss #
#########################
# Adapt the answer here: http://stackoverflow.com/questions/41863814/kl-divergence-in-tensorflow
def KL_divergence(self, dist_a, dist_b, name_scope='KL_Div'):
return tf.reduce_mean(-tf.nn.softmax_cross_entropy_with_logits(logits=dist_a, labels=dist_b))
#########################
# Save/Load #
#########################
@property
def get_model_dir(self):
if self.config['model_dir'] is None:
return "{}_{}".format(
self.config['dataset_name'], self.config['batch_size'])
else:
return self.config['model_dir']
def load(self, ckpt_dir='checkpoint'):
return self.load_from_ckpt(ckpt_dir) if self.param_G is None \
else self.load_from_npy()
def save(self, checkpoint_dir, step):
""" Checkpoint saver """
model_name = "SoundNet.model"
checkpoint_dir = os.path.join(checkpoint_dir, self.get_model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load_from_ckpt(self, checkpoint_dir='checkpoint'):
""" Checkpoint loader """
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.get_model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Success to read {}".format(ckpt_name))
self.counter = int(ckpt_name.rsplit('-', 1)[-1])
print(" [*] Start counter from {}".format(self.counter))
return True
else:
print(" [*] Failed to find a checkpoint under {}".format(checkpoint_dir))
return False
def load_from_npy(self):
if self.param_G is None: return False
data_dict = self.param_G
for key in data_dict:
with tf.variable_scope(self.config['name_scope'] + '/'+ key, reuse=True):
for subkey in data_dict[key]:
try:
var = tf.get_variable(subkey)
self.sess.run(var.assign(data_dict[key][subkey]))
print('Assign pretrain model {} to {}'.format(subkey, key))
except:
print('Ignore {}'.format(key))
self.param_G.clear()
return True
def main():
args = parse_args()
local_config['phase'] = args.phase
# Setup visible device
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_device
# Make path
if not os.path.exists(args.outpath):
os.mkdir(args.outpath)
# Load pre-trained model
param_G = np.load(local_config['param_g_dir'], encoding='latin1').item() \
if args.phase in ['finetune', 'extract'] \
else None
# Init. Session
sess_config = tf.ConfigProto()
sess_config.allow_soft_placement=True
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as session:
# Build model
model = Model(session, config=local_config, param_G=param_G)
if args.phase in ['train', 'finetune']:
# Training phase
model.train()
elif args.phase == 'extract':
# import when we need
from extract_feat import extract_feat
# Feature extractor
#sound_sample = np.reshape(np.load('./data/demo.npy', encoding='latin1'), [local_config['batch_size'], -1, 1, 1])
import librosa
audio_path = './data/demo.mp3'
sound_sample, _ = load_audio(audio_path)
sound_sample = preprocess(sound_sample, config=local_config)
output = extract_feat(model, sound_sample, args)
def parse_args():
""" Parse input arguments """
parser = argparse.ArgumentParser(description='SoundNet')
parser.add_argument('-o', '--outpath', dest='outpath', help='output feature path. e.g., [output]', default='output')
parser.add_argument('-p', '--phase', dest='phase', help='demo or extract feature. e.g., [train, finetune, extract]', default='finetune')
parser.add_argument('-m', '--layer', dest='layer_min', help='start from which feature layer. e.g., [1]', type=int, default=1)
parser.add_argument('-x', dest='layer_max', help='end at which feature layer. e.g., [24]', type=int, default=None)
parser.add_argument('-c', '--cuda', dest='cuda_device', help='which cuda device to use. e.g., [0]', default='0')
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('-s', '--save', dest='is_save', help='Turn on save mode. [False(default), True]', action='store_true')
parser.set_defaults(is_save=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| [
"kayleeburns@berkeley.edu"
] | kayleeburns@berkeley.edu |
7cbf9742fce46306b45b1a55e6f3897b5222fef8 | 76d725c5c49060d22282159f1aeba5e41ebd2a6c | /kingdom_api/api/schemas/user.py | 387a6da0e416e593b90de79a42cda865a29cd1ea | [] | no_license | AlexKupreev/kingdom-api | 91eec48492fbd93c80bafaf93e13f4fd856442d6 | 48408f43cbbeed035ed30c29c8c8f13c8886e949 | refs/heads/master | 2023-01-02T15:28:45.014765 | 2020-10-04T05:22:33 | 2020-10-04T05:22:33 | 288,093,898 | 0 | 0 | null | 2020-10-04T05:22:34 | 2020-08-17T05:46:22 | Python | UTF-8 | Python | false | false | 681 | py | from kingdom_api.models import User
from kingdom_api.extensions import ma, db
from flask_security import hash_password
class HashedPassword(ma.Field):
"""Password field that deserializes to a Version object."""
def _deserialize(self, value, *args, **kwargs):
return hash_password(value)
def _serialize(self, value, *args, **kwargs):
return str(value)
class UserSchema(ma.SQLAlchemyAutoSchema):
id = ma.Int(dump_only=True)
# password = ma.String(load_only=True, required=True)
password = HashedPassword(load_only=True, required=True)
class Meta:
model = User
sqla_session = db.session
load_instance = True
| [
"agkupreev@gmail.com"
] | agkupreev@gmail.com |
201b2218e8b32914f9c15ce300bbf0e5f67dc4a5 | cb455b1f1b086ba3d9f8479686b9dbfb34b5fb45 | /tk5.py | 6222f87b1143cbec394c3746dfd11fdeaa27c5c7 | [] | no_license | lv0817/TKinter | a17ebc6f6df71f48c305a050e3e1ff6b262c5afc | f5720b1491fc20433147f9be1a6e838528e7f40d | refs/heads/master | 2021-05-09T08:31:00.385396 | 2018-02-01T06:10:28 | 2018-02-01T06:10:28 | 119,396,317 | 1 | 0 | null | 2018-02-01T06:10:29 | 2018-01-29T14:52:14 | Python | UTF-8 | Python | false | false | 674 | py | from tkinter import *
'''
以后一定要注意多看官方文档
'''
root = Tk()
v = IntVar()
Radiobutton(root,text='one',variable = v,value=2).pack(anchor='w')
Radiobutton(root,text='two',variable = v,value=2).pack(anchor='w')
Radiobutton(root,text='three',variable = v,value=3).pack(anchor='w')
#三个value的值不同,三个按钮才可以互斥
#当选中一个按钮的时候,会吧这个按钮的值交给v(IntVar),此时,这个v是所有button都共有的
#然后每个按钮将拿到的v和自己的value一对比,一样,就表示选中,否则,就没有选中
#
l = Label(root,textvariable = v)#在标签上显示v的结果
l.pack()
mainloop() | [
"412435484@qq.com"
] | 412435484@qq.com |
2bdcf0d2360b2138a878f8f56df0bafff715dcb6 | 50f10b7ef7e42a5998a8e61bdcb2728b23629198 | /skills.py | 1cc2b2cc96258af1e6869366c6f62a33a46e37cb | [] | no_license | ltaziri/skills-dictionaries | fd1b9446aa3b77bb41aa1d123dc0cced2fee616e | 3bdaf1b46388414cf05800bce65e6685f65b4c1a | refs/heads/master | 2021-01-10T07:36:19.392184 | 2016-01-17T00:42:19 | 2016-01-17T00:42:19 | 49,797,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,897 | py | """Skills-dictionaries.
IMPORTANT: these problems are meant to be solved using dictionaries and sets.
"""
def without_duplicates(words):
"""Given a list of words, return the list with duplicates removed.
For example:
>>> sorted(without_duplicates(
... ["rose", "is", "a", "rose", "is", "a", "rose"]))
['a', 'is', 'rose']
You should treat differently-capitalized words as different:
>>> sorted(without_duplicates(
... ["Rose", "is", "a", "rose", "is", "a", "rose"]))
['Rose', 'a', 'is', 'rose']
"""
non_dupe_list = []
#add each unique word to the non_dupe_list
for word in words:
if word not in non_dupe_list:
non_dupe_list.append(word)
# sort the non_duple_list
non_dupe_list.sort()
return non_dupe_list
def find_unique_common_items(list1, list2):
"""Produce the set of *unique* common items in two lists.
Given two lists, return a list of the *unique* common items shared between
the lists.
IMPORTANT: you may not use 'if ___ in ___' or the method 'index'.
This should find [1, 2]:
>>> sorted(find_unique_common_items([1, 2, 3, 4], [2, 1]))
[1, 2]
However, now we only want unique items, so for these lists, don't show
more than 1 or 2 once:
>>> sorted(find_unique_common_items([4, 3, 2, 1], [1, 1, 2, 2]))
[1, 2]
"""
common_list = []
#Go through each value in the first list, check if the value is in the
#second list, and if so check if its already in the common_list, if
#not add it.
for val in list1:
if val in list2:
if val not in common_list:
common_list.append(val)
common_list.sort()
return common_list
# #Alternate, using set to find unique values
# common_list = []
# for val in list1:
# if val in list2:
# common_list.append(val)
# #Convert list to a set to remove any duplicates
# common_set = set(common_list[:])
# unique_common_list = []
# #put items in set back into a list of unique items and sort
# for val in common_set:
# unique_common_list.append(val)
# unique_common_list.sort()
# return unique_common_list
def count_unique(input_string):
"""Count unique words in a string.
This function should take a single string and return a dictionary
that has all of the distinct words as keys, and the number of times
that word appears in the string as values.
For example:
>>> print_dict(count_unique("each word appears once"))
{'appears': 1, 'each': 1, 'once': 1, 'word': 1}
Words that appear more than once should be counted each time:
>>> print_dict(count_unique("rose is a rose is a rose"))
{'a': 2, 'is': 2, 'rose': 3}
It's fine to consider punctuation part of a word (e.g., a comma
at the end of a word can be counted as part of that word) and
to consider differently-capitalized words as different:
>>> print_dict(count_unique("Porcupine see, porcupine do."))
{'Porcupine': 1, 'do.': 1, 'porcupine': 1, 'see,': 1}
"""
split_string = input_string.split(" ")
word_dict = {}
for word in split_string:
if word_dict.get(word, False) == False:
word_dict[word] = 1
else:
word_dict[word] += 1
return word_dict
def translate_to_pirate_talk(phrase):
"""Translate phrase to pirate talk.
Given a phrase, translate each word to the Pirate-speak equivalent.
Words that cannot be translated into Pirate-speak should pass through
unchanged. Return the resulting sentence.
Here's a table of English to Pirate translations:
English Pirate
---------- ----------------
sir matey
hotel fleabag inn
student swabbie
boy matey
madam proud beauty
professor foul blaggart
restaurant galley
your yer
excuse arr
students swabbies
are be
lawyer foul blaggart
the th'
restroom head
my me
hello avast
is be
man matey
For example:
>>> translate_to_pirate_talk("my student is not a man")
'me swabbie be not a matey'
You should treat words with punctuation as if they were different
words:
>>> translate_to_pirate_talk("my student is not a man!")
'me swabbie be not a man!'
"""
pirate_dict = {
"sir" : "matey",
"hotel" : "fleabag inn",
"student" : "swabbie",
"boy" : "matey",
"madam" : "proud beauty",
"professor" : "foul blaggart",
"restaurant" : "galley",
"your" : "yer",
"excuse" : "arr",
"students" : "swabbies",
"are" : "be",
"lawyer" : "foul blaggart",
"the" : "th'",
"restroom" : "head",
"my" : "me",
"hello" : "avast",
"is" : "be",
"man" : "matey"
}
# Split phrase into individual words
split_phrase = phrase.split(" ")
pirate_word_list = []
# Check if each regular word has a value in the pirate dictionary
# If it doesn't append the regular word to the pirate_word_list
# If it does then append the pirate word to the pirate_word_list
for word in split_phrase:
if pirate_dict.get(word, False) == False:
pirate_word_list.append(word)
else:
pirate_word_list.append(pirate_dict[word])
# Create a new phrase concatenating all the words in the pirate_word_list.
new_phrase = "%s" % pirate_word_list[0]
for pir_word in pirate_word_list[1:]:
new_phrase = new_phrase + " " + pir_word
return new_phrase
def sort_by_word_length(words):
"""Given list of words, return list of ascending [(len, [words])].
Given a list of words, return a list of tuples, ordered by word-length.
Each tuple should have two items---the length of the words for that
word-length, and the list of words of that word length.
For example:
>>> sort_by_word_length(["ok", "an", "apple", "a", "day"])
[(1, ['a']), (2, ['ok', 'an']), (3, ['day']), (5, ['apple'])]
"""
word_dict = {}
#Add each word to word_dict, with the key being the length of the word and
#the value being a list containing the word. If a key of that length
# already exists, add the word to the list of values.
for word in words:
if word_dict.get(len(word), False) == False:
word_dict[len(word)] = [word]
else:
word_dict[len(word)].append(word)
#Return a list of the key value pairs in the dictionary.
return word_dict.items()
def get_sum_zero_pairs(input_list):
"""Given list of numbers, return list of pair summing to 0.
Given a list of numbers, add up each individual pair of numbers.
Return a list of each pair of numbers that adds up to 0.
For example:
>>> sort_pairs( get_sum_zero_pairs([1, 2, 3, -2, -1]) )
[[-2, 2], [-1, 1]]
>>> sort_pairs( get_sum_zero_pairs([3, -3, 2, 1, -2, -1]) )
[[-3, 3], [-2, 2], [-1, 1]]
This should always be a unique list, even if there are
duplicates in the input list:
>>> sort_pairs( get_sum_zero_pairs([1, 2, 3, -2, -1, 1, 1]) )
[[-2, 2], [-1, 1]]
Of course, if there are one or more zeros to pair together,
that's fine, too (even a single zero can pair with itself):
>>> sort_pairs( get_sum_zero_pairs([1, 2, 3, -2, -1, 1, 1, 0]) )
[[-2, 2], [-1, 1], [0, 0]]
"""
# convert the list to a set to remove duplicate values
unique_num_set = set(input_list[:])
abs_value_dict = {}
zero_pair_list = []
#Check each number in the set, if it is a zero, append it to the zero_pair
# list as its own pair. If the number is not a zero add it to the
# abs_value dictionary using the number's absolute value as the key and the
# number as the value. If a key already exists for the absolute value of a
# given number, add the number to the list of values under that key.
for num in unique_num_set:
if num == 0:
zero_pair_list.append([num, num])
else:
if abs_value_dict.get(abs(num), False) == False:
abs_value_dict[abs(num)] = [num]
else:
abs_value_dict[abs(num)].append(num)
# Check each key in the abs_value dictionary to see if there is more than
# one value associated with it. If so append a sorted list of values to the
# zero_pair list.
for item in abs_value_dict:
if len(abs_value_dict[item]) > 1:
zero_pair_list.append(sorted(abs_value_dict[item]))
return zero_pair_list
##############################################################################
# You can ignore everything below this.
def print_dict(d):
# This method is just used to print dictionaries in key-alphabetical
# order, and is only used for our documentation tests. You can ignore it.
if isinstance(d, dict):
print "{" + ", ".join("%r: %r" % (k, d[k]) for k in sorted(d)) + "}"
else:
print d
def sort_pairs(l):
# Print sorted list of pairs where the pairs are sorted. This is used only
# for documentation tests. You can ignore it.
return sorted(sorted(pair) for pair in l)
if __name__ == "__main__":
print
import doctest
if doctest.testmod().failed == 0:
print "*** ALL TESTS PASSED ***"
print
| [
"ltaziri@Elizabeth-Taziris-MacBook-Pro.local"
] | ltaziri@Elizabeth-Taziris-MacBook-Pro.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.