hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68bfdc92a00fb5b201a60c596f05590bb0c70edb | 6,281 | py | Python | File Man/note.py | splimter/Univ | f28723756616ffd212d1c59d5547240c694a2d24 | [
"MIT"
] | null | null | null | File Man/note.py | splimter/Univ | f28723756616ffd212d1c59d5547240c694a2d24 | [
"MIT"
] | null | null | null | File Man/note.py | splimter/Univ | f28723756616ffd212d1c59d5547240c694a2d24 | [
"MIT"
] | null | null | null | import os
import helpers as H
# Cette function va afficher le menu
def menu():
print("1- Ajouter un Etudiant")
print("2- Ajouter un Matieres")
print("3- Ajouter un Note")
print("4- afficher la Moyenne d'un Etudiant")
print("5- Modifie le numIns d'un Etudiant")
print("6- suprimer une Note")
print("7- quiter")
# Une classs qui definie les propreiter d'Etudiant
class Etudiant:
numInsc = 0
nom = ""
prenom = ""
group = 0
# Constructeur
def __init__(self, numInsc, nom, prenom, group):
self.numInsc = numInsc
self.nom = nom
self.prenom = prenom
self.group = group
# Une classs qui definie les propreiter d'Matieres
class Matieres:
codeMat = ""
libelle = ""
coef = 0
# Constructeur
def __init__(self, codeMat, libelle, coef):
self.codeMat = codeMat
self.libelle = libelle
self.coef = coef
# Une classs qui definie les propreiter d'Note
class Note:
numInsc = 0
codeMat = ""
note = 0.0
# Constructeur
def __init__(self, numInsc, codeMat, note):
self.numInsc = numInsc
self.codeMat = codeMat
self.note = note
def ajoutEtudiant(Etudiant):
# Virifie si le ficher existe
if not os.path.isfile("Etudiants.dat"):
with open("Etudiants.dat", 'w') as file:
file.write("Gestion des Etudiants\n")
file.write("----------------------\n")
# Ecrire dans le ficher les donner importer depui l'objet
with open("Etudiants.dat", 'a+', encoding='utf-8') as file:
file.tell()
file.write("\n")
file.write(str(Etudiant.numInsc) + "\n")
file.write(Etudiant.nom + "\n")
file.write(Etudiant.prenom + "\n")
file.write(str(Etudiant.group) + "\n")
file.write("------------------------")
def ajoutMatiere(Matieres):
# Virifie si le ficher existe
if not os.path.isfile("Matieres.dat"):
with open("Matieres.dat", 'w') as file:
file.write("Gestion des Matieres\n")
file.write("---------------\n")
# Ecrire dans le ficher les donner importer depui l'objet
with open("Matieres.dat", 'a+', encoding='utf-8') as file:
file.tell()
file.write("\n")
file.write(Matieres.libelle + "\n")
file.write(str(Matieres.codeMat) + "\n")
file.write(str(Matieres.coef) + "\n")
file.write("------------------------")
def ajoutNote(Note):
# Virifie si le ficher existe
if not os.path.isfile("Notes.dat"):
with open("Notes.dat", 'w') as file:
file.write("Gestion des Notes\n")
file.write("---------------\n")
# Ecrire dans le ficher les donner importer depui l'objet
with open("Notes.dat", 'a+', encoding='utf-8') as file:
file.tell()
file.write("\n")
file.write(str(Note.numInsc) + "\n")
file.write(Note.codeMat + "\n")
file.write(str(Note.note) + "\n")
file.write("------------------------")
def MoyenneEtudiant(NumInsc):
if os.path.isfile("Notes.dat"):
n = H.fileTolist("Notes.dat")
else:
return print("Pas de fichier Notes")
if os.path.isfile("Matieres.dat"):
m = H.fileTolist("Matieres.dat")
else:
return print("Pas de fichier Matieres")
n = (H.focusNote(NumInsc, n))
if not n:
return print("Etudiant indisponible")
k = []
for i in range(1, len(n), 3):
if n[i] not in k:
cm = m.copy()
k.append(H.focusMat(n[i], cm))
m = []
for i in k:
for j in i:
m.append(j)
if not m:
return print("Matieres indisponible")
H.extraTrimer(m)
H.extraTrimer(n)
s = 0
for i in range(len(m)):
s += float(m[i]) * float(n[i])
return print("la moyen est: ", s / sum(m))
def ModifEtudiant(NumInsc, newId):
# Virifie si le ficher existe
if os.path.isfile("Notes.dat"):
n = H.fileTolist("Notes.dat")
else:
return print("Pas de fichier Notes")
if os.path.isfile("Etudiants.dat"):
e = H.fileTolist("Etudiants.dat")
else:
return print("Pas de fichier Etudiants")
if str(NumInsc) not in e:
return print("Etudiants indisponible")
if str(NumInsc) not in n:
return
print(n)
print(e)
note, std = [], []
os.remove("Etudiants.dat")
i = 0
while i < len(e):
if e[i] == str(NumInsc):
e[i] = str(newId)
ajoutEtudiant(Etudiant(e[i], e[i+1], e[i+2], e[i+3]))
i += 4
os.remove("Notes.dat")
i = 0
while i < len(n):
if n[i] == str(NumInsc):
n[i] = str(newId)
ajoutNote(Note(n[i], n[i+1], n[i+2]))
i += 3
def SupprimerNote(NumInsc, CodeMat):
if os.path.isfile("Notes.dat"):
n = H.fileTolist("Notes.dat")
else:
return print("Pas de fichier Notes")
os.remove("Notes.dat")
i = 0
while i < len(n):
if n[i] == str(NumInsc) and n[i+1] == str(CodeMat):
i += 3
continue
ajoutNote(Note(n[i], n[i+1], n[i+2]))
i += 3
while True:
menu()
choice = int(input("Option: "))
if choice == 1:
obj = Etudiant(input("Numero d'inscription: "), input("Nom: "), input("Prenom: "), input("Group: "))
ajoutEtudiant(obj)
elif choice == 2:
obj = Matieres(input("Code du matieres: "), input("Libelle: "), input("Coef: "))
ajoutMatiere(obj)
elif choice == 3:
obj = Note(input("Numero d'inscription: "), input("Code du matieres: "), input("Note: "))
ajoutNote(obj)
elif choice == 4:
MoyenneEtudiant(input("donner NumInsc d'Etudiant"))
elif choice == 5:
ModifEtudiant(input("donner NumInsc d'Etudiant"), input("donner le nouveau NumInsc"))
elif choice == 6:
SupprimerNote(input("donner NumInsc d'Etudiant"), input("donner le Code du matiere"))
elif choice == 7:
os._exit(1)
else:
print("Donner une bonne option")
| 27.915556 | 109 | 0.532559 | 802 | 6,281 | 4.154613 | 0.169576 | 0.059424 | 0.048019 | 0.023409 | 0.485294 | 0.382053 | 0.352341 | 0.304622 | 0.254502 | 0.254502 | 0 | 0.009651 | 0.307117 | 6,281 | 224 | 110 | 28.040179 | 0.755974 | 0.078968 | 0 | 0.275449 | 0 | 0 | 0.199098 | 0.017313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05988 | false | 0 | 0.011976 | 0 | 0.209581 | 0.113772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68c4d76626da0bce8c26017b492f398cab45c0ab | 18,560 | py | Python | GZP_GTO_QGIS/INSTALLATION/GeoTaskOrganizer/gto_point.py | msgis/swwat-gzp-template | 080afbe9d49fb34ed60ba45654383d9cfca01e24 | [
"MIT"
] | 3 | 2019-06-18T15:28:09.000Z | 2019-07-11T07:31:45.000Z | GZP_GTO_QGIS/INSTALLATION/GeoTaskOrganizer/gto_point.py | msgis/swwat-gzp-template | 080afbe9d49fb34ed60ba45654383d9cfca01e24 | [
"MIT"
] | 2 | 2019-07-11T14:03:25.000Z | 2021-02-08T16:14:04.000Z | GZP_GTO_QGIS/INSTALLATION/GeoTaskOrganizer/gto_point.py | msgis/swwat-gzp-template | 080afbe9d49fb34ed60ba45654383d9cfca01e24 | [
"MIT"
] | 1 | 2019-06-12T11:07:37.000Z | 2019-06-12T11:07:37.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
# QDoubleValidator needs QValidator in qgis 3.4!
from PyQt5.QtCore import Qt, QLocale, pyqtSignal
from PyQt5.QtGui import QDoubleValidator
from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QHBoxLayout, QToolButton, QToolBar, QComboBox, QDoubleSpinBox
from PyQt5 import uic
from qgis.core import QgsProject, QgsCoordinateReferenceSystem, QgsPointXY, QgsCoordinateTransform, QgsVectorLayerUtils, \
QgsWkbTypes, QgsGeometry
from qgis.gui import QgsProjectionSelectionWidget, QgsVertexMarker
import os
from .gto_point_tool import GTOPointTool
class GTOPointWidget(QWidget):
isActive = pyqtSignal(bool)
def __init__(self, gtoObj, parent=None):
super(GTOPointWidget, self).__init__(parent)
self.gtomain = gtoObj.gtomain
self.info = self.gtomain.info
self.debug = self.gtomain.debug
try:
# references
self.helper = self.gtomain.helper
self.iface = self.gtomain.iface
self.prj = QgsProject.instance()
self.canvas = self.iface.mapCanvas()
# references
self.x = 0
self.y = 0
self.xt = 0
self.yt = 0
self.snaped = False
self.crs_transform = None
self.crs_layer = None
self.parent_widget = None # e.g toolbar
self.userEditX = False
self.userEditY = False
# config
self.tools = []
self.coordinatereferences = None
self.scale = 0
self.center = True
self.enable_save = False
self.precision = -1
self.cboCoordSystems = None
self.is_widgetaction = False
self.show_tool_button = False
self.addpoint_attributes = {}
self.tools_after_addpoint = []
# widgets:
uic.loadUi(os.path.join(os.path.dirname(__file__), 'gto_point.ui'), self)
# point tool
self.btnPointTool = self.btnPoint
# x
self.coordX = self.coordX
# self.validX = QDoubleValidator(sys.float_info.min, sys.float_info.max, 16, self.coordX) # no negative numbers possible?
# self.validX = QDoubleValidator(-999999999, 999999999, 16, self.coordX) # working but no range limit
self.validX = QDoubleValidator(self.coordX) # so we use the standard:
self.validX.setNotation(QDoubleValidator.StandardNotation) # By default, this property is set to ScientificNotation: i.e. 1.5E-2 is possible
self.coordX.setValidator(self.validX)
self.btnCopyXt = self.btnCopyXt
self.lblX = self.lblX
# y
self.coordY = self.coordY
self.validY = QDoubleValidator(self.coordY)
self.validY.setNotation(QDoubleValidator.StandardNotation)
self.coordY.setValidator(self.validY)
self.btnCopyYt = self.btnCopyYt
self.lblY = self.lblY
# show
self.btnShow = self.btnShow
self.btnShow.setIcon(self.helper.getIcon('mActionZoomPoint.png'))
# add point
self.btnAddPoint = self.btnAddPoint
self.btnAddPoint.setIcon(self.helper.getIcon('mActionAddPoint.png'))
self.btnAddPoint.setToolTip("Punkt erstellen")
# marker
self.marker = QgsVertexMarker(self.canvas)
self.marker.setColor(Qt.yellow)
self.marker.setIconType(QgsVertexMarker.ICON_CROSS)
self.marker.setIconSize(10)
self.marker.setPenWidth(3)
# See the enum IconType from http://www.qgis.org/api/classQgsVertexMarker.html
# maptool
self.mapTool = GTOPointTool(self.iface, self.canvas)
self.mapTool.isActive.connect(self.setToolStatus)
self.mapTool.canvasReleased.connect(self.setCoords)
# signals
# QToolButton.toggled()
self.btnPoint.clicked.connect(self.setMapTool)
# self.coordX.textChanged.connect(self.set_user_editX)
# self.coordY.textChanged.connect(self.set_user_editY)
self.coordX.textEdited.connect(self.set_user_editX)
self.coordY.textEdited.connect(self.set_user_editY)
# self.coordX.editingFinished.connect(self.check_coords)
# self.coordY.editingFinished.connect(self.check_coords)
self.btnShow.clicked.connect(self.showCoordinate)
self.btnCopyXt.clicked.connect(self.copyXt)
self.btnCopyYt.clicked.connect(self.copyYt)
self.btnAddPoint.clicked.connect(self.add_point)
self.prj.crsChanged.connect(self.prj_crs_changed)
self.iface.mapCanvas().currentLayerChanged.connect(self.layer_changed)
except Exception as e:
self.info.err(e)
def set_user_editX(self, *args):
try:
if self.debug: self.info.log("set_user_editX")
self.userEditX = True
self.marker.hide()
self.marker.setColor(Qt.blue)
self.snaped = False
except Exception as e:
self.info.err(e)
def set_user_editY(self, *args):
try:
if self.debug: self.info.log("set_user_editY")
self.userEditY = True
self.marker.hide()
self.marker.setColor(Qt.blue)
self.snaped = False
except Exception as e:
self.info.err(e)
def reset_user_edit(self):
if self.debug: self.info.log("reset_user_edit")
self.userEditX = False
self.userEditY = False
def check_coords(self):
try:
self.marker.hide()
if self.debug: self.info.log("useredit: X:", self.userEditX, "userEditY:", self.userEditY)
if self.coordX.text() == '':
self.coordX.setText('0')
self.x = 0
if self.coordY.text() == '':
self.coordY.setText('0')
self.y = 0
if self.userEditX or self.userEditY:
self.snaped = False
self.userEditX = False
self.userEditY = False
self.xt = float(self.coordX.text().replace(",", "."))
self.yt = float(self.coordY.text().replace(",", "."))
tr = QgsCoordinateTransform(self.crs_transform, self.prj.crs(), self.prj)
trPoint = tr.transform(QgsPointXY(self.xt, self.yt))
self.x = trPoint.x()
self.y = trPoint.y()
if self.debug: self.info.log("check_coords:", self.x, "/", self.y, "/snaped:", self.snaped)
except Exception as e:
self.info.err(e)
def setMapTool(self):
try:
self.canvas.setMapTool(self.mapTool)
except Exception as e:
self.info.err(e)
def set_parent_widget(self, widget):
try:
self.parent_widget = widget
if self.parent_widget.action.isChecked():
self.setMapTool()
except Exception as e:
self.info.err(e)
def setToolStatus(self, isActive):
try:
self.btnPoint.setChecked(isActive)
self.marker.hide()
self.isActive.emit(isActive)
if self.parent_widget is not None:
self.parent_widget.set_status(isActive)
except Exception as e:
self.info.err(e)
def setConfig(self, config):
try:
self.tools = config.get("tools", [])
self.coordinatereferences = config.get("coordinatereferences", None)
self.scale = config.get("scale", 0)
self.center = config.get("center", True)
self.enable_save = config.get('enable_save', False)
self.precision = config.get('precision', -1)
self.is_widgetaction = config.get('is_widgetaction', False)
self.show_tool_button = config.get('show_tool_button', not self.is_widgetaction)
self.addpoint_attributes = config.get("addpoint_attributes", {})
self.tools_after_addpoint = config.get("tools_after_addpoint", [])
if self.precision < 0:
self.precision, type_conversion_ok = self.prj.readNumEntry("PositionPrecision", "DecimalPlaces", 3)
# labels:
self.lblX.setText(config.get('label_x', 'X:'))
self.lblY.setText(config.get('label_y', 'Y:'))
# text
text = ''
if self.scale > 0 and self.center:
text = "Auf Koordinate zentrieren, Maßstab: " + str(self.scale)
elif self.center:
text = "Auf Koordinate zentrieren"
elif self.scale > 0:
text = "Maßstab: " + str(self.scale)
elif len(self.tools) > 0:
text = self.tools[0]
act = self.gtomain.findAction(self.tools[0])
if act is not None:
text = act.toolTip()
if act.icon() is not None:
self.btnShow.setIcon(act.icon())
if self.debug: self.info.log(text)
self.btnShow.setToolTip(text)
if self.btnShow.toolTip() == '':
self.btnShow.setHidden(True)
# add point
self.btnAddPoint.setHidden(not self.enable_save)
# point tool
self.btnPointTool.setHidden(not self.show_tool_button)
except Exception as e:
self.info.err(e)
def added(self): # widget was added to parent
try:
self.crs_transform = self.prj.crs()
self.crs_layer = self.iface.activeLayer().crs()
# set crs widget
if self.coordinatereferences is None:
# qgis transform
self.cboCoordSys.setHidden(True)
self.cboCoordSystems = self.mQgsProjectionSelectionWidget
self.cboCoordSystems.setMinimumWidth(460)
self.cboCoordSystems.setOptionVisible(QgsProjectionSelectionWidget.ProjectCrs, True)
self.cboCoordSystems.setCrs(self.prj.crs())
self.setCrs(self.cboCoordSystems.crs())
self.cboCoordSystems.crsChanged.connect(self.setCrs)
else:
# custom transform
self.mQgsProjectionSelectionWidget.setHidden(True)
self.cboCoordSystems = self.cboCoordSys
self.cboCoordSystems.setMinimumWidth(400)
self.cboCoordSystems.currentIndexChanged.connect(
lambda: self.setCrs(self.cboCoordSystems.currentData()))
self.cboCoordSystems.addItem(
"Projekt CRS: " + self.crs_transform.authid() + " - " + self.crs_transform.description(),
self.crs_transform)
for crsID in self.coordinatereferences:
try:
crs = QgsCoordinateReferenceSystem(crsID)
self.cboCoordSystems.addItem(crs.authid() + " - " + crs.description(), crs)
except Exception as e:
self.info.err(e)
self.cboCoordSystems.setCurrentIndex(0)
# here we know which type is cboCoordSystems!
self.setIconSizes()
except Exception as e:
self.info.err(e)
def setIconSizes(self):
try:
if self.parentWidget() is not None:
btns = self.findChildren(QToolButton)
for btn in btns:
try:
btn.setIconSize(self.iface.iconSize(False))
except:
pass
# help for the QGIS widget :S
self.cboCoordSystems.setMaximumHeight(self.cboCoordSys.height())
btns = self.cboCoordSystems.findChildren(QToolButton)
for btn in btns:
btn.setIconSize(self.iface.iconSize(False))
except Exception as e:
self.info.err(e)
def layer_changed(self, layer):
try:
if layer.geometryType() == QgsWkbTypes.GeometryType.PointGeometry:
self.btnAddPoint.setEnabled(True)
else:
self.btnAddPoint.setEnabled(False)
except Exception as e:
self.info.err(e)
def prj_crs_changed(self):
try:
self.reset_user_edit()
if self.coordinatereferences is not None: # my combo
self.crs_transform = self.prj.crs()
self.cboCoordSystems.setItemText(0,
"Projekt CRS: " + self.crs_transform.authid() + " - " + self.crs_transform.description())
self.cboCoordSystems.setItemData(0, self.crs_transform)
self.x = 0
self.y = 0
self.xt = 0
self.yt = 0
self.coordX.setText("---")
self.coordY.setText("---")
except Exception as e:
self.info.err(e)
def add_point(self):
try:
self.check_coords()
layer = self.iface.activeLayer()
if layer.geometryType() == QgsWkbTypes.GeometryType.PointGeometry:
self.prj.layerTreeRoot().findLayer(layer.id()).setItemVisibilityCheckedParentRecursive(True)
if self.x != 0 and self.y != 0:
feat = QgsVectorLayerUtils.createFeature(layer)
tr = QgsCoordinateTransform(self.prj.crs(), self.crs_layer, self.prj)
trPoint = tr.transform(QgsPointXY(self.x, self.y))
feat.setGeometry(QgsGeometry.fromPointXY(trPoint))
# direct save
# (res, features) = layer.dataProvider().addFeatures([feat])
# if self.debug: self.info.log("new point:", res, features[0])
# set attributes
dic_info = {"x": self.x, "y": self.y, "snaped": self.snaped}
# self.info.err(None,"mapping:",dic_info)
# self.info.err(None, "addpoint_attributes:", self.addpoint_attributes)
for k, v in self.addpoint_attributes.items():
# self.info.err(None,"attribute:",k,"value:",dic_info[v])
feat[k] = layer.fields().field(k).convertCompatible(dic_info[v])
features = [feat]
layer.featureAdded.connect(self.select_new_feature)
self.save_features(layer, features)
layer.featureAdded.disconnect(self.select_new_feature)
self.marker.hide()
self.helper.refreshLayer(layer)
self.gtomain.runcmd(self.tools_after_addpoint)
else:
self.info.gtoWarning('Ungültige Koordinaten! x:', self.x, "y:", self.y)
else:
self.info.gtoWarning('Kein Punktlayer ausgewählt!')
except Exception as e:
self.info.err(e)
def select_new_feature(self, featId):
try:
if self.debug: self.info.log("new featue:", self.iface.activeLayer().name(), "/ fid:", featId)
self.iface.activeLayer().selectByIds([featId])
self.mapTool.reset_marker()
self.marker.hide()
self.helper.refreshLayer(self.iface.activeLayer())
except Exception as e:
self.info.err(e)
def save_features(self, layer, features):
if not layer.isEditable():
layer.startEditing()
layer.beginEditCommand("layer {0} edit".format(layer.name()))
try:
layer.addFeatures(features)
layer.endEditCommand()
except Exception as e:
layer.destroyEditCommand()
raise e
def copyXt(self):
self.check_coords()
dsp = QDoubleSpinBox()
dsp.setDecimals(16)
self.helper.copyToClipboard(dsp.textFromValue(self.xt))
def copyYt(self):
self.check_coords()
dsp = QDoubleSpinBox()
dsp.setDecimals(16)
self.helper.copyToClipboard(dsp.textFromValue(self.yt))
def reset(self):
if self.debug: self.info.log("widget reset")
self.marker.hide()
def setCoords(self, point, snaped):
try:
self.reset_user_edit()
self.snaped = snaped
self.x = point.x()
self.y = point.y()
if self.debug: self.info.log("setCoords", self.x, "/", self.y)
self.setCrs(self.crs_transform)
# marker
self.marker.setCenter(QgsPointXY(self.x, self.y))
if snaped:
self.marker.setColor(Qt.red)
else:
self.marker.setColor(Qt.blue)
self.marker.show()
except Exception as e:
self.info.err(e)
def showCoordinate(self):
try:
self.check_coords()
self.marker.hide()
if self.x != 0 and self.y != 0:
pt_center = QgsPointXY(self.x, self.y)
self.marker.setCenter(pt_center)
self.marker.show()
# center map
if self.center:
self.canvas.setCenter(pt_center)
# scale map
if self.scale is not None and self.scale > 0:
self.canvas.zoomScale(self.scale)
self.canvas.refresh()
# run tools
self.gtomain.runcmd(self.tools)
else:
self.info.gtoWarning('Ungültige Koordinate! x:', self.x, "y:", self.y)
except Exception as e:
self.info.err(e)
def setCrs(self, crs):
try:
if self.debug: self.info.log("setCrs")
self.crs_transform = crs
tr = QgsCoordinateTransform(self.prj.crs(), self.crs_transform, self.prj)
trPoint = tr.transform(QgsPointXY(self.x, self.y))
self.xt = trPoint.x()
self.yt = trPoint.y()
d = round(trPoint.x(), self.precision)
display = str(d).replace(".", QLocale().decimalPoint())
self.coordX.setText(display)
d = round(trPoint.y(), self.precision)
display = str(d).replace(".", QLocale().decimalPoint())
self.coordY.setText(display)
except Exception as e:
self.info.err(e)
| 41.061947 | 154 | 0.563524 | 1,926 | 18,560 | 5.36189 | 0.185358 | 0.027888 | 0.022369 | 0.033117 | 0.333495 | 0.275007 | 0.214777 | 0.1376 | 0.128498 | 0.086085 | 0 | 0.006297 | 0.332651 | 18,560 | 451 | 155 | 41.152993 | 0.827466 | 0.073707 | 0 | 0.32967 | 0 | 0 | 0.034593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06044 | false | 0.002747 | 0.024725 | 0 | 0.090659 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68c9488a495919a38f2ff870419c8d832b14221b | 11,909 | py | Python | model/loss.py | arthurlirui/MVSDF | 0b1014682e9b5cd5a92fea715d26ebc9845da4bf | [
"MIT"
] | 76 | 2022-02-11T12:04:49.000Z | 2022-03-30T10:43:59.000Z | model/loss.py | arthurlirui/MVSDF | 0b1014682e9b5cd5a92fea715d26ebc9845da4bf | [
"MIT"
] | 1 | 2022-03-22T12:57:43.000Z | 2022-03-22T12:57:43.000Z | model/loss.py | arthurlirui/MVSDF | 0b1014682e9b5cd5a92fea715d26ebc9845da4bf | [
"MIT"
] | 4 | 2022-02-13T11:47:50.000Z | 2022-03-02T12:07:21.000Z | from numpy.lib.function_base import diff
import torch
from torch import nn
from torch.nn import functional as F
from itertools import accumulate
import numpy as np
import os
import importlib
from utils.my_utils import carving_t, carving_t2, FeatExt, get_in_range, idx_cam2img, idx_world2cam, normalize_for_grid_sample
import model.conf as conf
if os.environ.get('IDR_USE_ENV', '0') == '1' and os.environ.get('IDR_CONF', '') != '':
print('override conf: ', os.environ.get('IDR_CONF'))
conf = importlib.import_module(os.environ.get('IDR_CONF'))
class IDRLoss(nn.Module):
def __init__(self):
super().__init__()
self.l1_loss = nn.L1Loss(reduction='sum')
def get_rgb_loss(self,rgb_values, rgb_gt, network_object_mask, object_mask):
if (network_object_mask & object_mask).sum() == 0:
return torch.tensor(0.0).cuda().float()
rgb_values = rgb_values[network_object_mask & object_mask]
rgb_gt = rgb_gt.reshape(-1, 3)[network_object_mask & object_mask]
rgb_loss = self.l1_loss(rgb_values, rgb_gt) / float(object_mask.shape[0])
return rgb_loss
def get_eikonal_loss(self, grad_theta):
if grad_theta.shape[0] == 0:
return torch.tensor(0.0).cuda().float()
eikonal_loss = ((grad_theta.norm(2, dim=1) - 1) ** 2).mean()
return eikonal_loss
def get_depth_loss(self, eikonal_points_hom, eikonal_output, depths, cams, size, center, far_thresh, far_att, near_thresh, near_att, smooth):
eikonal_points_hom = eikonal_points_hom.detach()
depths = depths.permute(1,0,2,3,4)
cams = cams.permute(1,0,2,3,4)
eikonal_points_hom[:,:,:3,0] = eikonal_points_hom[:,:,:3,0] / 2 * size.view(1,1,1) + center.view(1,1,3)
if conf.use_invalid: # treat out-of-mask depth as inf
dist, occ, in_range = carving_t(eikonal_points_hom, depths, cams, out_thresh_perc=conf.out_thresh_perc)
else: # ignore out-of-mask depth
dist, occ, in_range = carving_t2(eikonal_points_hom, depths, cams, out_thresh_perc=conf.out_thresh_perc) # scale is applied in cams NOTE: hard code
dist_r = (dist / size.view(1,1) * 2 + (-1.25) * (~in_range).to(torch.float32)).clamp(-1.25,1.25)
# loss = nn.SmoothL1Loss()(eikonal_output, -dist_r)
# single depth
# not_inside = (dist_r < int_thresh)
# inside_weight = not_inside + (~not_inside) * int_att
far_mask = dist_r.abs() > far_thresh
far_weight = far_mask * far_att + (~far_mask)
near_mask = dist_r.abs() < near_thresh
near_weight = near_mask * near_att + (~near_mask)
if smooth is not None:
loss = nn.SmoothL1Loss(reduction='none')(eikonal_output / smooth, -dist_r / smooth) * smooth
else:
loss = nn.L1Loss(reduction='none')(eikonal_output, -dist_r)
loss = (loss * far_weight * near_weight * in_range).mean()
return loss
def get_feat_loss2(self, diff_surf_pts, uncerts, feat, cam, feat_src, src_cams, size, center, network_object_mask, object_mask):
mask = network_object_mask & object_mask
if (mask).sum() == 0:
return torch.tensor(0.0).float().cuda()
sample_mask = mask.view(feat.size()[0], -1)
hit_nums = sample_mask.sum(-1)
accu_nums = [0] + hit_nums.cumsum(0).tolist()
slices = [slice(accu_nums[i], accu_nums[i+1]) for i in range(len(accu_nums)-1)]
loss = []
## for each image in minibatch
for view_i, slice_ in enumerate(slices):
if slice_.start < slice_.stop:
## projection
diff_surf_pts_slice = diff_surf_pts[slice_]
pts_world = (diff_surf_pts_slice / 2 * size.view(1,1) + center.view(1,3)).view(1,-1,1,3,1) # 1m131
pts_world = torch.cat([pts_world, torch.ones_like(pts_world[...,-1:,:])], dim=-2) # 1m141
# rgb_pack = torch.cat([rgb[view_i:view_i+1], rgb_src[view_i]], dim=0) # v3hw
cam_pack = torch.cat([cam[view_i:view_i+1], src_cams[view_i]], dim=0) # v244
pts_img = idx_cam2img(idx_world2cam(pts_world, cam_pack), cam_pack) # vm131
## gathering
grid = pts_img[...,:2,0] # vm12
# feat2_pack = self.feat_ext(rgb_pack)[2] # vchw # TODO: multi-scale feature
feat2_pack = torch.cat([feat[view_i:view_i+1], feat_src[view_i]], dim=0)
grid_n = normalize_for_grid_sample(feat2_pack, grid/2)
grid_in_range = get_in_range(grid_n)
valid_mask = (grid_in_range[:1,...] * grid_in_range[1:,...]).unsqueeze(1) > 0.5 # and
gathered_feat = F.grid_sample(feat2_pack, grid_n, mode='bilinear', padding_mode='zeros', align_corners=False) # vcm1
## calculation
diff = gathered_feat[:1] - gathered_feat[1:]
if uncerts is None:
gathered_norm = gathered_feat.norm(dim=1, keepdim=True) # vcm1
diff_mask = diff.norm(dim=1, keepdim=True) < ((gathered_norm[:1,...] + gathered_norm[1:,...])/2*1)
print('feat loss mask', (valid_mask & diff_mask).sum().item(), '/', valid_mask.size()[0] * valid_mask.size()[2])
sample_loss = (diff * valid_mask * diff_mask).abs().mean()
else:
uncert = uncerts[view_i].unsqueeze(1).unsqueeze(3) # (v-1)1m1
print(f'uncert: {uncert.min():.4f}, {uncert.median():.4f}, {uncert.max():.4f}')
sample_loss = ((diff.abs() * (-uncert).exp() + 0.01 * uncert)*valid_mask).mean()
else:
sample_loss = torch.zeros(1).float().cuda()
loss.append(sample_loss)
loss = sum(loss) / len(loss)
return loss
def get_feat_loss_corr(self, diff_surf_pts, uncerts, feat, cam, feat_src, src_cams, size, center, network_object_mask, object_mask):
mask = network_object_mask & object_mask
if (mask).sum() == 0:
return torch.tensor(0.0).float().cuda()
sample_mask = mask.view(feat.size()[0], -1)
hit_nums = sample_mask.sum(-1)
accu_nums = [0] + hit_nums.cumsum(0).tolist()
slices = [slice(accu_nums[i], accu_nums[i+1]) for i in range(len(accu_nums)-1)]
loss = []
## for each image in minibatch
for view_i, slice_ in enumerate(slices):
if slice_.start < slice_.stop:
## projection
diff_surf_pts_slice = diff_surf_pts[slice_]
pts_world = (diff_surf_pts_slice / 2 * size.view(1,1) + center.view(1,3)).view(1,-1,1,3,1) # 1m131
pts_world = torch.cat([pts_world, torch.ones_like(pts_world[...,-1:,:])], dim=-2) # 1m141
# rgb_pack = torch.cat([rgb[view_i:view_i+1], rgb_src[view_i]], dim=0) # v3hw
cam_pack = torch.cat([cam[view_i:view_i+1], src_cams[view_i]], dim=0) # v244
pts_img = idx_cam2img(idx_world2cam(pts_world, cam_pack), cam_pack) # vm131
## gathering
grid = pts_img[...,:2,0] # vm12
# feat2_pack = self.feat_ext(rgb_pack)[2] # vchw # TODO: multi-scale feature
feat2_pack = torch.cat([feat[view_i:view_i+1], feat_src[view_i]], dim=0)
grid_n = normalize_for_grid_sample(feat2_pack, grid/2)
grid_in_range = get_in_range(grid_n)
valid_mask = (grid_in_range[:1,...] * grid_in_range[1:,...]).unsqueeze(1) > 0.5 # and
gathered_feat = F.grid_sample(feat2_pack, grid_n, mode='bilinear', padding_mode='zeros', align_corners=False) # vcm1
## calculation
gathered_norm = gathered_feat.norm(dim=1, keepdim=True) # v1m1
corr = (gathered_feat[:1] * gathered_feat[1:]).sum(dim=1, keepdim=True) \
/ gathered_norm[:1].clamp(min=1e-9) / gathered_norm[1:].clamp(min=1e-9) # (v-1)1m1
corr_loss = (1 - corr).abs()
if uncerts is None:
diff_mask = corr_loss < 0.5
print('feat loss mask', (valid_mask & diff_mask).sum().item(), '/', valid_mask.size()[0] * valid_mask.size()[2])
sample_loss = (corr_loss * valid_mask * diff_mask).mean()
else:
uncert = uncerts[view_i].unsqueeze(1).unsqueeze(3) # (v-1)1m1
print(f'uncert: {uncert.min():.4f}, {uncert.median():.4f}, {uncert.max():.4f}')
sample_loss = ((corr_loss * (-uncert).exp() + uncert)*valid_mask).mean()
else:
sample_loss = torch.zeros(1).float().cuda()
loss.append(sample_loss)
loss = sum(loss) / len(loss)
return loss
def get_surf_loss(self, surf_indicator_output, network_object_mask, object_mask_true):
mask = network_object_mask & object_mask_true
N = mask.sum()
gt1 = torch.ones(N, dtype=surf_indicator_output.dtype, device=surf_indicator_output.device)
gt0 = torch.zeros(surf_indicator_output.size()[0]-N, dtype=surf_indicator_output.dtype, device=surf_indicator_output.device)
gt = torch.cat([gt1, gt0], dim=0)
loss = nn.BCEWithLogitsLoss(reduction='mean')(surf_indicator_output, gt)
return loss
def forward(self, model_outputs, ground_truth, train_progress, n_img):
rgb_gt = ground_truth['rgb'].cuda()
network_object_mask = model_outputs['network_object_mask']
object_mask = model_outputs['object_mask']
ground_truth['size'] = ground_truth['size'][:1]
ground_truth['center'] = ground_truth['center'][:1]
if conf.enable_rgb:
rgb_loss = self.get_rgb_loss(model_outputs['rgb_values'], rgb_gt, network_object_mask, object_mask)
else:
rgb_loss = torch.zeros(1).float().cuda()
eikonal_loss = self.get_eikonal_loss(model_outputs['grad_theta'])
depth_loss = self.get_depth_loss(model_outputs['eikonal_points_hom'], model_outputs['eikonal_output'], ground_truth['depths'], ground_truth['depth_cams'], ground_truth['size'], ground_truth['center'],
far_thresh=conf.far_thresh, far_att=conf.far_att(train_progress),
near_thresh=conf.near_thresh, near_att=conf.near_att(train_progress),
smooth=conf.smooth(train_progress))
if conf.phase[0] <= train_progress and conf.enable_feat:
feat_loss = self.get_feat_loss_corr(model_outputs['diff_surf_pts'], model_outputs.get('uncerts'), *[ground_truth[attr] for attr in ['feat', 'cam', 'feat_src', 'src_cams', 'size', 'center']], network_object_mask, object_mask)
else:
feat_loss = torch.zeros(1).float().cuda()
if conf.phase[0] <= train_progress:
surf_loss = self.get_surf_loss(model_outputs['surf_indicator_output'], network_object_mask, model_outputs['object_mask_true'])
else:
surf_loss = torch.zeros(1).float().cuda()
loss = rgb_loss * conf.rgb_weight(train_progress) + \
eikonal_loss * conf.eikonal_weight + \
surf_loss * conf.surf_weight + \
feat_loss * conf.feat_weight(train_progress) + \
depth_loss * conf.depth_weight(train_progress)
return {
'loss': loss,
'rgb_loss': rgb_loss,
'eikonal_loss': eikonal_loss,
'depth_loss': depth_loss,
'feat_loss': feat_loss,
'surf_loss': surf_loss
}
| 54.131818 | 237 | 0.591485 | 1,611 | 11,909 | 4.080074 | 0.130975 | 0.047163 | 0.038795 | 0.045489 | 0.60429 | 0.557584 | 0.493838 | 0.475734 | 0.465845 | 0.440286 | 0 | 0.028853 | 0.275338 | 11,909 | 219 | 238 | 54.378995 | 0.732793 | 0.065497 | 0 | 0.431953 | 0 | 0.011834 | 0.049185 | 0.005987 | 0 | 0 | 0 | 0.004566 | 0 | 1 | 0.047337 | false | 0 | 0.065089 | 0 | 0.183432 | 0.029586 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68c9ea4c15db64ff1e75a845e1e422b9fc6a1a9f | 682 | py | Python | list-users/list-users.py | nl-hugo/power-bi-snippets | 5d63f88526b8dc4241dd26301b4b8fd72096c822 | [
"MIT"
] | null | null | null | list-users/list-users.py | nl-hugo/power-bi-snippets | 5d63f88526b8dc4241dd26301b4b8fd72096c822 | [
"MIT"
] | null | null | null | list-users/list-users.py | nl-hugo/power-bi-snippets | 5d63f88526b8dc4241dd26301b4b8fd72096c822 | [
"MIT"
] | null | null | null | import argparse
from bs4 import BeautifulSoup
def list_users(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
soup = BeautifulSoup(f.read(), 'html.parser')
for user in soup.find_all('li', class_='accessListData'):
user_name = user.find(class_='username').string
user_role = user.find(class_='performAction').span.string
print(f'{user_name} ({user_role})')
def handler():
parser = argparse.ArgumentParser()
parser.add_argument('file', help='Power BI service url with user list')
args = parser.parse_args()
if args.file:
list_users(args.file)
if __name__ == "__main__":
handler()
| 27.28 | 75 | 0.64956 | 89 | 682 | 4.730337 | 0.550562 | 0.042755 | 0.057007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003731 | 0.214076 | 682 | 24 | 76 | 28.416667 | 0.781716 | 0 | 0 | 0 | 0 | 0 | 0.184751 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.117647 | 0 | 0.235294 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68cf170ee8230ba31ec966d1a0949d9a8190f908 | 6,614 | py | Python | module.py | Afvanjaffer/Image-translation- | a7b379ddd1fb3fa21a6f0a41fdb1ed986b5c68d7 | [
"MIT"
] | 9 | 2019-02-10T20:23:21.000Z | 2021-03-04T04:15:49.000Z | module.py | Afvanjaffer/Image-translation- | a7b379ddd1fb3fa21a6f0a41fdb1ed986b5c68d7 | [
"MIT"
] | 1 | 2020-05-07T05:55:24.000Z | 2020-05-07T05:55:24.000Z | module.py | Afvanjaffer/Image-translation- | a7b379ddd1fb3fa21a6f0a41fdb1ed986b5c68d7 | [
"MIT"
] | 4 | 2018-07-14T08:03:27.000Z | 2020-07-29T09:36:54.000Z |
import tensorflow as tf
def conv2d_layer(
inputs,
filters,
kernel_size = [4, 4],
strides = [2, 2],
padding = 'same',
activation = None,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.02),
name = None):
conv_layer = tf.layers.conv2d(
inputs = inputs,
filters = filters,
kernel_size = kernel_size,
strides = strides,
padding = padding,
activation = activation,
kernel_initializer = kernel_initializer,
name = name)
return conv_layer
def conv2d_transpose_layer(
inputs,
filters,
kernel_size,
strides,
padding = 'same',
activation = None,
kernel_initializer = tf.truncated_normal_initializer(stddev = 0.02),
name = None):
deconv_layer = tf.layers.conv2d_transpose(
inputs = inputs,
filters = filters,
kernel_size = kernel_size,
strides = strides,
padding = padding,
activation = activation,
kernel_initializer = kernel_initializer,
name = name)
return deconv_layer
def instance_norm_layer(
inputs,
epsilon = 1e-06,
activation_fn = None,
name = None):
instance_norm_layer = tf.contrib.layers.instance_norm(
inputs = inputs,
epsilon = epsilon,
activation_fn = activation_fn)
return instance_norm_layer
def residual_block(
inputs,
filters,
kernel_size = [3, 3],
strides = [1, 1],
name_prefix = 'residule_block_'):
p1 = (kernel_size[0] - 1) // 2
p2 = (kernel_size[1] - 1) // 2
paddings = [[0, 0], [p1, p1], [p2, p2], [0, 0]]
h0_pad = tf.pad(tensor = inputs, paddings = paddings, mode = 'REFLECT', name = 'pad0')
h1 = conv2d_layer(inputs = h0_pad, filters = filters, kernel_size = kernel_size, strides = strides, padding = 'valid', activation = None, name = name_prefix + 'conv1')
h1_norm = instance_norm_layer(inputs = h1, activation_fn = tf.nn.relu, name = name_prefix + 'norm1')
h1_pad = tf.pad(tensor = h1_norm, paddings = paddings, mode = 'REFLECT', name = 'pad1')
h2 = conv2d_layer(inputs = h1_pad, filters = filters, kernel_size = kernel_size, strides = strides, padding = 'valid', activation = None, name = name_prefix + 'conv2')
h2_norm = instance_norm_layer(inputs = h2, activation_fn = None, name = name_prefix + 'norm2')
return inputs + h2_norm
def discriminator(inputs, num_filters = 64, reuse = False, scope_name = 'discriminator'):
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h0 = conv2d_layer(inputs = inputs, filters = num_filters, activation = tf.nn.leaky_relu, name = 'h0_conv')
h1 = conv2d_layer(inputs = h0, filters = num_filters * 2, activation = None, name = 'h1_conv')
h1_norm = instance_norm_layer(inputs = h1, activation_fn = tf.nn.leaky_relu, name = 'h1_norm')
h2 = conv2d_layer(inputs = h1_norm, filters = num_filters * 4, activation = None, name = 'h2_conv')
h2_norm = instance_norm_layer(inputs = h2, activation_fn = tf.nn.leaky_relu, name = 'h2_norm')
h3 = conv2d_layer(inputs = h2_norm, filters = num_filters * 8, strides = [1, 1], activation = None, name = 'h3_conv')
h3_norm = instance_norm_layer(inputs = h3, activation_fn = tf.nn.leaky_relu, name = 'h3_norm')
h4 = conv2d_layer(inputs = h3_norm, filters = 1, strides = [1, 1], activation = None, name = 'h4_conv')
return h4
def generator_resnet(inputs, num_filters = 64, output_channels = 3, reuse = False, scope_name = 'generator_resnet'):
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
#output_channels = inputs.shape[-1]
# Check tf.pad using 'REFLECT' mode
# https://www.tensorflow.org/api_docs/python/tf/pad
c0 = tf.pad(tensor = inputs, paddings = [[0, 0], [3, 3], [3, 3], [0, 0]], mode = 'REFLECT', name = 'c0_pad')
c1 = conv2d_layer(inputs = c0, filters = num_filters, kernel_size = [7, 7], strides = [1, 1], padding = 'valid', activation = None, name = 'c1_conv')
c1_norm = instance_norm_layer(inputs = c1, activation_fn = tf.nn.relu, name = 'c1_norm')
c2 = conv2d_layer(inputs = c1_norm, filters = num_filters * 2, kernel_size = [3, 3], strides = [2, 2], activation = None, name = 'c2_conv')
c2_norm = instance_norm_layer(inputs = c2, activation_fn = tf.nn.relu, name = 'c2_norm')
c3 = conv2d_layer(inputs = c2_norm, filters = num_filters * 4, kernel_size = [3, 3], strides = [2, 2], activation = None, name = 'c3_conv')
c3_norm = instance_norm_layer(inputs = c3, activation_fn = tf.nn.relu, name = 'c3_norm')
r1 = residual_block(inputs = c3_norm, filters = num_filters * 4, name_prefix = 'residual1_')
r2 = residual_block(inputs = r1, filters = num_filters * 4, name_prefix = 'residual2_')
r3 = residual_block(inputs = r2, filters = num_filters * 4, name_prefix = 'residual3_')
r4 = residual_block(inputs = r3, filters = num_filters * 4, name_prefix = 'residual4_')
r5 = residual_block(inputs = r4, filters = num_filters * 4, name_prefix = 'residual5_')
r6 = residual_block(inputs = r5, filters = num_filters * 4, name_prefix = 'residual6_')
r7 = residual_block(inputs = r6, filters = num_filters * 4, name_prefix = 'residual7_')
r8 = residual_block(inputs = r7, filters = num_filters * 4, name_prefix = 'residual8_')
r9 = residual_block(inputs = r8, filters = num_filters * 4, name_prefix = 'residual9_')
d1 = conv2d_transpose_layer(inputs = r9, filters = num_filters * 2, kernel_size = [3, 3], strides = [2, 2], name = 'd1_deconv')
d1_norm = instance_norm_layer(inputs = d1, activation_fn = tf.nn.relu, name = 'd1_norm')
d2 = conv2d_transpose_layer(inputs = d1_norm, filters = num_filters, kernel_size = [3, 3], strides = [2, 2], name = 'd2_deconv')
d2_norm = instance_norm_layer(inputs = d2, activation_fn = tf.nn.relu, name = 'd2_norm')
d2_pad = tf.pad(tensor = d2_norm, paddings = [[0, 0], [3, 3], [3, 3], [0, 0]], mode = 'REFLECT', name = 'd2_pad')
d3 = conv2d_layer(inputs = d2_pad, filters = output_channels, kernel_size = [7, 7], strides = [1, 1], padding = 'valid', activation = tf.nn.tanh, name = 'd3_conv')
return d3
| 42.127389 | 171 | 0.638948 | 868 | 6,614 | 4.624424 | 0.142857 | 0.071251 | 0.076233 | 0.063029 | 0.598904 | 0.474838 | 0.358495 | 0.343797 | 0.336572 | 0.315147 | 0 | 0.044559 | 0.239946 | 6,614 | 156 | 172 | 42.397436 | 0.753929 | 0.03039 | 0 | 0.394495 | 0 | 0 | 0.057335 | 0 | 0 | 0 | 0 | 0 | 0.018349 | 1 | 0.055046 | false | 0 | 0.009174 | 0 | 0.119266 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68d55b5aefd614b22b142233553491d7448b67bf | 40,735 | py | Python | src/core/train.py | spencerpomme/GSPNet | ff165de95ec0f258ba444ff343d18d812a066b8f | [
"MIT"
] | null | null | null | src/core/train.py | spencerpomme/GSPNet | ff165de95ec0f258ba444ff343d18d812a066b8f | [
"MIT"
] | null | null | null | src/core/train.py | spencerpomme/GSPNet | ff165de95ec0f258ba444ff343d18d812a066b8f | [
"MIT"
] | null | null | null | '''
Copyright <2019> <COPYRIGHT Pingcheng Zhang>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Training methods defined here.
A part of GSPNet project.
'''
import numpy as np
import pandas as pd
import pickle as pkl
import matplotlib.pyplot as plt
import os
import re
import time
import torch
from torch import nn, optim
from torch.utils import data
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data import SubsetRandomSampler, SequentialSampler
from glob import iglob, glob
from matplotlib import pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
from tqdm import tqdm
from pathlib import Path
# import models, loss functions and datasets
import models
from models import *
from losses import *
from datasets import *
# Environment global variable
TRAIN_ON_MULTI_GPUS = False # (torch.cuda.device_count() >= 2)
# helper functions
def create_dir(directory: str):
'''
Helper function to create directory
Args:
directory: a string describing the to be created dir
'''
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
raise OSError
def save_model(model, hyps: dict):
'''
Save model to local file.
Args:
model: trained model
hyps: hyperparameters of the trained model
'''
name = ''
mn = hyps["mn"]
name += f'mn{hyps["mn"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder',
'ConvAutoEncoder', 'ConvAutoEncoderShallow']:
name += f'-os{hyps["os"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder']:
name += f'-is{hyps["is"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder',
'SparseAutoEncoder', 'SparseConvAutoEncoder']:
name += f'-hd{hyps["hd"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN']:
name += f'-nl{hyps["nl"]}-dp{hyps["dp"]}-sl{hyps["sl"]}'
if mn in ['ConvClassifier', 'MLPClassifier']:
name += f'-nc{hyps["nc"]}'
if mn in ['ConvAutoEncoder', 'ConvAutoEncoderShallow', 'VAE',
'SparseAutoEncoder', 'SparseConvAutoEncoder']:
name += f'-md{hyps["md"]}'
if mn in ['VAE']:
name += f'-zd{hyps["z_dim"]}'
if mn in ['GAN']:
name += f'-zs{hyps["zs"]}'
name += f'-ss{hyps["ss"]}'
name += f'-cd{hyps["cd"]}'
name += f'-vs{hyps["vs"]}'
name += f'-md{hyps["md"]}'
name += f'-bs{hyps["bs"]}-lr{hyps["lr"]}.pt'
model_path = str(Path(os.path.dirname(os.path.realpath(__file__))).resolve(
).parents[1].joinpath(f'output/trained_models/weight'))
create_dir(model_path)
model_path = model_path + '/' + name
torch.save(model.state_dict(), model_path)
def get_curve_name(dest: str, hyps: dict):
'''
Generate training loss curve image name.
Args:
dest: folder to save trained model
hyps: hyperparameters of the trained model
'''
name = ''
mn = hyps["mn"]
name += f'mn{hyps["mn"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder',
'ConvAutoEncoder', 'ConvAutoEncoderShallow']:
name += f'-os{hyps["os"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder']:
name += f'-is{hyps["is"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN', 'AutoEncoder',
'SparseAutoEncoder']:
name += f'-hd{hyps["hd"]}'
if mn in ['VanillaLSTM', 'VanillaGRU', 'EmbedRNN']:
name += f'-nl{hyps["nl"]}-dp{hyps["dp"]}-sl{hyps["sl"]}'
if mn in ['ConvClassifier', 'MLPClassifier']:
name += f'-nc{hyps["nc"]}'
if mn in ['ConvAutoEncoder', 'ConvAutoEncoderShallow', 'VAE',
'SparseAutoEncoder']:
name += f'-md{hyps["md"]}'
if mn in ['VAE']:
name += f'-zd{hyps["z_dim"]}'
name += f'-bs{hyps["bs"]}-lr{hyps["lr"]}.png'
return dest + '/' + name
# data feeder, type 2, deprecated
def batch_dataset(datadir, seq_len):
'''
Batch the neural network data using DataLoader.
Args:
datadir: Directory storing tensor data
seq_len: The sequence length of each batch
Return:
DataLoader with batched data
'''
# WARNING: this function is deprecated, will remove after 2019 May 1st
data_iter = iglob(datadir + '/*')
states = []
print('Loading dataset...')
print('Loading training set...')
for state in tqdm(data_iter, ascii=True):
state = torch.load(state).numpy()
states.append(state)
states = np.array(states)
states = states.reshape((len(states), -1))
states = states.astype('float32')
num_batches = len(states) // seq_len
# only full batches
states = states[: num_batches * seq_len]
features, targets = [], []
for idx in range(0, (len(states) - seq_len)):
features.append(states[idx: idx + seq_len])
targets.append(states[idx + seq_len])
data = TensorDataset(torch.from_numpy(np.array(features)),
torch.from_numpy(np.array(targets)))
data_loader = torch.utils.data.DataLoader(
data, shuffle=False, batch_size=batch_size, num_workers=0)
return data_loader
def check_encoder_dim(mode: str, model, dataset):
'''
Check whether the convolutional autoencoder architecture matches data dimension.
Args:
mode: `pnf` or `od` mode
model: convencoder model instance
dataset: dataset object
Returns:
if_match: bool
'''
loader = DataLoader(dataset,
batch_size=1, num_workers=0, drop_last=True)
iterator = iter(loader)
X, y = iterator.next()
if mode == 'od':
assert X.size(1) == 1, f'Mode `od`: X expect channel size 1 but get {X.size(1)}.'
elif mode == 'pnf':
assert X.size(1) == 3, f'Mode `pnf`: X expect channel size 3 but get {X.size(1)}'
# training function of CNN classification
def train_classifier(model, optimizer, criterion, n_epochs,
train_loader, valid_loader, hyps,
stop_criterion=20, device='cuda:0',
show_every_n_batches=100):
'''
Train a CNN classifier with the given hyperparameters.
Args:
model: The PyTorch Module that holds the neural network
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
n_epochs: Total go through of entire dataset
train_loader: Training data loader
valid_loader: Validation data loader
hyps: A dict containing hyperparameters
stop_criterion: Early stop variable
device: Training device
show_every_batches: Display loss every this number of time steps
Returns:
A trained model. The best model will also be saved locally.
'''
# clear cache
torch.cuda.empty_cache()
# start timing
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
# validation constants
early_stop_count = 0
valid_loss_min = np.inf
train_losses = []
valid_losses = []
# for plot training loss and validation loss
tl = []
vl = []
model.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# early stop mechanism:
if early_stop_count >= stop_criterion:
print(f'Early stop triggered after {stop_criterion} epochs.')
break
for data, label in train_loader:
# forward, back prop
if TRAIN_ON_MULTI_GPUS:
data, label = data.cuda(), label.cuda()
elif torch.cuda.is_available():
data, label = data.to(device), label.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, label)
loss.backward()
optimizer.step()
# record loss
train_losses.append(loss.item())
model.eval()
for v_data, v_label in valid_loader:
v_data, v_label = v_data.to(device), v_label.to(device)
v_output = model(v_data)
val_loss = criterion(v_output, v_label)
valid_losses.append(val_loss.item())
model.train()
avg_val_loss = np.mean(valid_losses)
avg_tra_loss = np.mean(train_losses)
tl.append(avg_tra_loss)
vl.append(avg_val_loss)
# printing loss stats
print(
f'Epoch: {epoch_i:>4}/{n_epochs:<4} | Loss: {avg_tra_loss:.6f} | Val Loss: {avg_val_loss:.6f} | Min Val: {valid_loss_min:.6f}',
flush=True)
# decide whether to save model or not:
if avg_val_loss < valid_loss_min:
print(f'Valid Loss {valid_loss_min:.6f} -> {avg_val_loss:.6f}. \
Saving...', flush=True)
save_model(model, hyps)
valid_loss_min = avg_val_loss
early_stop_count = 0
else:
early_stop_count += 1
# clear
train_losses = []
valid_losses = []
# returns a trained model
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:2f} seconds.')
return model, (tl, vl)
def run_classifier_training(model_name, data_dir, epochs, bs, vs, lr, nc,
dp=0.5, device='cuda:0'):
'''
Main function of cnn classifier training.
Args:
model_name: model name
data_dir: data source location
epochs: number of epochs to train
bs: batch_size
vs: validation size, proportion of validation data set
lr: learning_rate
nc: number of classes
dp: drop_prob
device: GPU or CPU
'''
# Training parameters
epochs = epochs
learning_rate = 0.001
batch_size = bs
# Model parameters
input_size = 69 * 69 * 3 # <- don't change this value
drop_prob = 0.5
# Show stats for every n number of batches
senb = 5000
# wrap essential info into dictionary:
hyps = {
'mn': model_name,
'bs': batch_size,
'lr': learning_rate,
'nc': nc,
'dp': drop_prob
}
# LSTM data loader
data_set = SnapshotClassificationDatasetRAM(data_dir)
# split data for training and validation
num_train = len(data_set)
indices = list(range(num_train))
split = int(np.floor(vs * num_train))
# shuffle
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = DataLoader(data_set, sampler=train_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
valid_loader = DataLoader(data_set, sampler=valid_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
# initialize model
model = models.__dict__[model_name](n_classes=nc)
# model training device
if TRAIN_ON_MULTI_GPUS:
model = nn.DataParallel(model).cuda()
elif torch.cuda.is_available():
model = model.to(device)
else:
print('Training on CPU, very long training time is expectable.')
# optimizer and criterion(loss function)
if TRAIN_ON_MULTI_GPUS:
optimizer = optim.SGD(model.module.parameters(), lr=learning_rate)
else:
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# start training
trained_model, tlvl = train_classifier(model, optimizer, criterion, epochs,
train_loader, valid_loader, hyps, device=device)
# loss plot
tl, vl = tlvl
x = np.arange(len(tl))
# for model 3 of classification only
x, tl, vl = x[1:], tl[1:], vl[1:]
train_curve, = plt.plot(x, tl, 'r-', label='train loss')
valid_curve, = plt.plot(x, vl, 'b-', label='valid loss')
plt.legend(handler_map={train_curve: HandlerLine2D(numpoints=1)})
curve_name = get_curve_name('trained_models', hyps) #BUG
plt.savefig(curve_name)
plt.show()
def forward_back_prop(model, optimizer, criterion, inp, target, hidden, clip):
"""
Forward and backward propagation on the neural network.
Args:
model: The PyTorch Module that holds the neural network
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
inp: A batch of input to the neural network
target: The target output for the batch of input
hidden: Hidden state
clip: Clip the overly large gradient
Returns:
The loss and the latest hidden state Tensor
"""
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
if type(hidden) == tuple:
h = tuple([each.data for each in hidden])
else:
h = hidden.data
# zero accumulated gradients
if TRAIN_ON_MULTI_GPUS:
model.module.zero_grad()
else:
model.zero_grad()
# print(f'input shape: {inp}, target shape: {target}')
# get the output from the model
output, h = model(inp, h)
# perform backpropagation and optimization
# calculate the loss and perform backprop
loss = criterion(output, target)
loss.backward()
# 'clip_grad_norm' helps prevent the exploding gradient problem in RNNs
if TRAIN_ON_MULTI_GPUS:
nn.utils.clip_grad_norm_(model.module.parameters(), clip)
else:
nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), h
# training function for sequential prediction
def train_recurrent(model, batch_size, optimizer, criterion,
n_epochs, train_loader, valid_loader, hyps, clip=5,
stop_criterion=20, show_every_n_batches=1, multi_gpus=True,
device='cuda:0'):
'''
Train a LSTM model with the given hyperparameters.
Args:
model: The PyTorch Module that holds the neural network
batch_size: batch size, integer
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
n_epochs: Total go through of entire dataset
train_loader: Training data loader
valid_loader: Validation data loader
hyps: A dict containing model parameters
clip: Clip the overly large gradient
show_every_batches: Display loss every this number of time steps
multi_gpus: Whether have multiple GPUs
device: location to put tensor/model
Returns:
A trained model. The best model will also be saved locally.
'''
# clear cache
torch.cuda.empty_cache()
# start timing
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
# validation constants
early_stop_count = 0
valid_loss_min = np.inf
train_losses = []
# for plot training loss and validation loss
tl = []
vl = []
model.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
if TRAIN_ON_MULTI_GPUS and multi_gpus:
hidden = model.module.init_hidden(batch_size)
else:
hidden = model.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# early stop mechanism:
if early_stop_count >= stop_criterion:
print(f'Early stop triggered after {stop_criterion} epochs.')
break
# make sure you iterate over completely full batches, only
n_batches = len(train_loader.dataset) // batch_size
if batch_i > n_batches:
break
# forward, back prop
# print(f'inputs shape: {inputs.shape} labels shape: {labels.shape}')
# print(f'inputs dtype: {inputs[0][0][0].dtype} label shape: {labels[0][0].dtype}')
if TRAIN_ON_MULTI_GPUS and multi_gpus:
inputs, labels = inputs.cuda(), labels.cuda()
elif torch.cuda.is_available():
inputs, labels = inputs.to(device), labels.to(device)
# print(f'Input shape: {inputs.shape}')
loss, hidden = forward_back_prop(
model, optimizer, criterion, inputs, labels, hidden, clip
)
# record loss
train_losses.append(loss)
# print loss every show_every_n_batches batches
# including validation loss
if batch_i % show_every_n_batches == 0:
# get validation loss
if TRAIN_ON_MULTI_GPUS:
val_h = model.module.init_hidden(batch_size)
else:
val_h = model.init_hidden(batch_size)
valid_losses = []
# switch to validation mode
model.eval()
for v_inputs, v_labels in valid_loader:
if TRAIN_ON_MULTI_GPUS and multi_gpus:
v_inputs, v_labels = v_inputs.cuda(), v_labels.cuda()
elif torch.cuda.is_available():
v_inputs, v_labels = v_inputs.to(device), v_labels.to(device)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
# if type is tuple, then the model is LSTM
if type(val_h) == tuple:
val_h = tuple([each.data for each in val_h])
else:
val_h = val_h.data
v_output, val_h = model(v_inputs, val_h)
val_loss = criterion(v_output, v_labels)
valid_losses.append(val_loss.item())
model.train()
avg_val_loss = np.mean(valid_losses)
avg_tra_loss = np.mean(train_losses)
tl.append(avg_tra_loss)
vl.append(avg_val_loss)
# printing loss stats
print(
f'Epoch: {epoch_i:>4}/{n_epochs:<4} | Loss: {avg_tra_loss:.6f} ' +
f'| Val Loss: {avg_val_loss:.6f} | Min Val: {valid_loss_min:.6f}',
flush=True)
# decide whether to save model or not:
if avg_val_loss < valid_loss_min:
print(f'Valid Loss {valid_loss_min:.6f} -> {avg_val_loss:.6f}. Saving...', flush=True)
# saving state_dict of model
save_model(model, hyps)
valid_loss_min = avg_val_loss
early_stop_count = 0
else:
early_stop_count += 1
train_losses = []
valid_losses = []
# returns a trained model
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:.2f} seconds.')
return model, (tl, vl)
# run functions of this module
def run_recursive_training(model_name, data_dir, epochs, bs, vs, lr, sl=12,
hd=256, nl=2, dp=0.5, device='cuda:0'):
'''
Main function of RNNs training.
Args:
model_name: model name
data_dir: data source location
epochs: number of epochs to train
bs: batch_size
vs: validation proportion
lr: learning_rate
sl: sequence_length
hd: hidden_dim
nl: n_layers
dp: drop_prob
device: training hardware, GPU or CPU
'''
# LSTM Model Data params
sequence_length = sl # number of time slices in a sequence
clip = 5
# Training parameters
epochs = epochs
learning_rate = lr
batch_size = bs
# Model parameters
input_size = 69 * 69 * 3 # <- don't change this value
output_size = input_size
hidden_dim = hd
# Number of RNN Layers
n_layers = nl
drop_prob = dp
# Show stats for every n number of batches
senb = 5000
# wrap essential info into dictionary:
hyps = {
'mn': model_name,
'is': input_size,
'os': output_size,
'sl': sequence_length,
'bs': batch_size,
'lr': learning_rate,
'hd': hidden_dim,
'nl': n_layers,
'dp': drop_prob
}
data_set = S2FDatasetRAM(data_dir, sequence_length)
# split dataset for training and validation
num_train = len(data_set)
indices = list(range(num_train))
split = int(np.floor(vs * num_train)) # hard coded to 0.8
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SequentialSampler(train_idx)
valid_sampler = SequentialSampler(valid_idx)
train_loader = DataLoader(data_set, sampler=train_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
valid_loader = DataLoader(data_set, sampler=valid_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
# initialize model
model = models.__dict__[model_name](input_size, output_size, hidden_dim,
n_layers=n_layers, drop_prob=drop_prob,
device=device)
# model training device
if TRAIN_ON_MULTI_GPUS:
model = nn.DataParallel(model).cuda()
elif torch.cuda.is_available():
model = model.to(device)
else:
print('Training on CPU, very long training time is expectable.')
# optimizer and criterion(loss function)
if TRAIN_ON_MULTI_GPUS:
optimizer = optim.Adam(model.module.parameters(), lr=learning_rate)
else:
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# loss:
# criterion = nn.MSELoss()
criterion = dich_mse_loss
# start training
trained_model, tlvl = train_recurrent(model, batch_size, optimizer,
criterion, epochs, train_loader,
valid_loader, hyps, device=device)
# loss plot
tl, vl = tlvl
x = np.arange(len(tl))
train_curve, = plt.plot(x, tl, 'r-', label='train loss')
valid_curve, = plt.plot(x, vl, 'b-', label='valid loss')
plt.legend(handler_map={train_curve: HandlerLine2D(numpoints=1)})
curve_name = get_curve_name('trained_models', hyps)
plt.savefig(curve_name)
plt.show()
def train_encoder(model, optimizer, criterion, n_epochs, loader, hyps,
early_stop_count=20, device='cuda:0', show_every_n_epochs=10):
'''
Train an auto encoder with the given hyperparameters.
Args:
model: The PyTorch Module that holds the neural network
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
n_epochs: Total go through of entire dataset
early_stop_count: Early stop number
loader: Training data loader
hyps: A dict containing hyperparameters
device: Training device
show_every_batches: Display loss every this number of time steps
Returns:
A trained model. The best model will also be saved locally.
'''
torch.cuda.empty_cache()
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
loss_min = np.inf
losses = []
stop = 0
model.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
if stop >= early_stop_count:
print(f'Stop converging for {stop} epochs, early stop triggered.')
break
for data, _ in loader:
if TRAIN_ON_MULTI_GPUS:
data = data.cuda()
elif torch.cuda.is_available():
data = data.to(device)
optimizer.zero_grad()
output = model(data)
# print(f'output.shape -> {output.shape} | data.shape -> {data.shape}')
loss = criterion(output, data)
loss.backward()
optimizer.step()
losses.append(loss.item())
if epoch_i % show_every_n_epochs == 0:
avg_loss = np.mean(losses)
print(f'Epoch: {epoch_i:>4}/{n_epochs:<4} | Loss: {avg_loss:.6f}')
if avg_loss < loss_min:
print(f'Valid Loss {loss_min:.6f} -> {avg_loss:.6f}. Saving...')
# saving state_dict of model
save_model(model, hyps)
loss_min = avg_loss
stop = 0
else:
stop += 1
losses = []
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:2f} seconds.')
return model
def train_vae(model, optimizer, criterion, n_epochs,
loader, hyps, device='cuda:0', show_every_n_batches=100):
'''
Train an VAE with the given hyperparameters.
Args:
model: The PyTorch Module that holds the neural network
optimizer: The PyTorch optimizer for the neural network
criterion: The PyTorch loss function
n_epochs: Total go through of entire dataset
loader: Training data loader
hyps: A dict containing hyperparameters
device: Training device
show_every_batches: Display loss every this number of time steps
Returns:
A trained model. The best model will also be saved locally.
'''
# clear cache
torch.cuda.empty_cache()
# start timing
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
# validation constants
valid_loss_min = np.inf
losses = []
bces = []
klds = []
model.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
for data, _ in loader:
# forward, back prop
if TRAIN_ON_MULTI_GPUS:
data = data.cuda()
elif torch.cuda.is_available():
data = data.to(device)
recon_images, mu, logvar = model(data)
# print(f'label.shape -> {label.shape} | recon_images.shape -> {recon_images.shape}')
loss, bce, kld = criterion(recon_images, data, mu, logvar)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# record loss
losses.append(loss.item())
bces.append(bce.item())
klds.append(kld.item())
al = np.mean(losses)
ab = np.mean(bces)
ak = np.mean(klds)
# printing loss stats
print(
f'Epoch: {epoch_i:>4}/{n_epochs:<4} | Loss: {al:.6f} | BCE: {ab:.6f} | KLD: {ak:.6f}', flush=True)
# clear
losses = []
save_model(model, hyps)
# returns a trained model
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:2f} seconds.')
return model
def run_encoder_training(model_name, data_dir, epochs, bs, vs, lr, mode='od',
hd=512, device='cuda:0'):
'''
Main function of auto encoder.
Args:
model_name: model name
data_dir: location of training data
epochs: number of epochs to train
bs: batch_size
vs: validation size
lr: learning rate
mode: pnf or od
hd: hidden dim
device: where to train the model
'''
# Training parameters
epochs = epochs
learning_rate = lr
batch_size = bs
# Model parameters
if mode == 'od':
input_size = 69 * 69 * 1
elif mode == 'pnf':
input_size = 69 * 69 * 3
else:
raise ValueError('Only `od` and `pnf` are supported.')
output_size = input_size
hidden_dim = hd
# wrap essential info into dictionary:
hyps = {
'is': input_size,
'os': output_size,
'mn': model_name,
'hd': hidden_dim,
'bs': batch_size,
'lr': learning_rate,
'md': mode
}
# Initialize data loaders
# LSTM data loader
if model_name in ['ConvAutoEncoder',
'ConvAutoEncoderShallow',
'VAE',
'SparseAutoEncoder',
'SparseConvAutoEncoder']:
data_set = ConvEncoderDatasetRAM(data_dir)
else:
data_set = EncoderDatasetRAM(data_dir)
# split dataset for training and validation
num_train = len(data_set)
indices = list(range(num_train))
split = int(np.floor(0.8 * num_train)) # hard coded to 0.8
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SequentialSampler(train_idx)
valid_sampler = SequentialSampler(valid_idx)
loader = DataLoader(data_set, sampler=train_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
valid_loader = DataLoader(data_set, sampler=valid_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
# initialize model
# This part is currently very mixed. Change in the future.
if model_name in ['ConvAutoEncoder', 'ConvAutoEncoderShallow']:
model = models.__dict__[model_name](hyps['os'], mode=hyps['md'])
elif model_name == 'VAE':
model = models.__dict__[model_name](hyps['md'], hidden_dim=hyps['hd'], z_dim=32)
elif model_name in ['SparseConvAutoEncoder', 'SparseAutoEncoder']:
model = models.__dict__[model_name](hyps['md'], hidden_dim=hyps['hd'])
else:
model = models.__dict__[model_name](hyps['is'], hyps['os'], hidden_dim=hyps['hd'])
print(model)
# model training device
if TRAIN_ON_MULTI_GPUS:
model = nn.DataParallel(model).cuda()
elif torch.cuda.is_available():
model = model.to(device)
else:
print('Training on CPU, very long training time is expectable.')
check_encoder_dim(mode, model, data_set)
# optimizer and criterion(loss function)
if TRAIN_ON_MULTI_GPUS:
optimizer = optim.Adam(model.module.parameters(), lr=learning_rate)
else:
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
if model_name == 'VAE':
criterion = vae_loss
trained_model = train_vae(model, optimizer, criterion, epochs, loader,
hyps, device=device)
else:
criterion = nn.MSELoss()
# criterion = nn.L1Loss()
# criterion = dich_mse_loss
# start training
trained_model = train_encoder(
model, optimizer, criterion, epochs, loader, hyps, device=device
)
return trained_model
def train_GAN(D, G, d_optimizer, g_optimizer, n_epochs, z_size,
train_loader, valid_loader, sample_size, hyps, device='cuda:0',
print_every=100):
'''
GAN training function.
Args:
D:
G:
d_optimizer:
g_optimizer:
n_epochs:
z_size: latent vector size
train_loader:
valid_loader:
sample_size:
hyps:
device:
print_every:
Returns:
trained model: G and D
'''
# clear cache
torch.cuda.empty_cache()
# start timing
start = time.time()
print(f'Training on device {device} started at {time.ctime()}')
# keep track of loss and generated, "fake" samples
samples = []
truths = []
losses = []
# Get some fixed data for sampling. These are images that are held
# constant throughout training, and allow us to inspect the model's performance
sample_size = 16
fixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size))
fixed_z = torch.from_numpy(fixed_z).float()
# train the network
for epoch in range(n_epochs):
for batch_i, (real_images, _) in enumerate(train_loader):
batch_size = real_images.size(0)
# important rescaling step
real_images = scale(real_images)
# ============================================ #
# TRAIN THE DISCRIMINATOR #
# ============================================ #
d_optimizer.zero_grad()
# 1. Train with real images
# Compute the discriminator losses on real images
if TRAIN_ON_MULTI_GPUS:
real_images = real_images.cuda()
elif torch.cuda.is_available():
real_images = real_images.to(device)
D_real = D(real_images)
d_real_loss = real_loss(D_real)
# 2. Train with fake images
# Generate fake images
z = np.random.uniform(-1, 1, size=(batch_size, z_size))
z = torch.from_numpy(z).float()
# move x to GPU, if available
if TRAIN_ON_MULTI_GPUS:
z = z.cuda()
elif torch.cuda.is_available():
z = z.to(device)
fake_images = G(z)
# Compute the discriminator losses on fake images
D_fake = D(fake_images)
d_fake_loss = fake_loss(D_fake)
# add up loss and perform backprop
d_loss = d_real_loss + d_fake_loss
d_loss.backward()
d_optimizer.step()
# ========================================= #
# TRAIN THE GENERATOR #
# ========================================= #
g_optimizer.zero_grad()
# 1. Train with fake images and flipped labels
# Generate fake images
z = np.random.uniform(-1, 1, size=(batch_size, z_size))
z = torch.from_numpy(z).float()
if TRAIN_ON_MULTI_GPUS:
z = z.cuda()
elif torch.cuda.is_available():
z = z.to(device)
fake_images = G(z)
# Compute the discriminator losses on fake images
# using flipped labels!
D_fake = D(fake_images)
g_loss = real_loss(D_fake) # use real loss to flip labels
# perform backprop
g_loss.backward()
g_optimizer.step()
# Print some loss stats
if batch_i % print_every == 0:
# append discriminator loss and generator loss
losses.append((d_loss.item(), g_loss.item()))
# print discriminator and generator loss
print(f'Epoch [{epoch+1:5d}/{n_epochs:5d}] |' +
f' d_loss: {d_loss.item():6.4f} | g_loss: {g_loss.item():6.4f}')
# AFTER EACH EPOCH
# generate and save sample, fake images
G.eval() # for generating samples
if TRAIN_ON_MULTI_GPUS:
fixed_z = fixed_z.cuda()
elif torch.cuda.is_available():
fixed_z = fixed_z.to(device)
samples_z = G(fixed_z)
samples.append(samples_z)
G.train() # back to training mode
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
save_model(D, hyps)
save_model(G, hyps)
end = time.time()
print(f'Training ended at {time.ctime()}, took {end-start:2f} seconds.')
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
# print(f'samples.shape -> {len(samples)} {[item.shape for item in samples]}')
_ = view_samples(-1, samples, mode=hyps['md'])
return G, D
def run_GAN_training(data_dir, epochs, bs, vs, lr=0.0002, z_size=128, sample_size=16,
conv_dim=64, beta1=0.5, beta2=0.999, mode='od', device='cuda:0'):
'''
Main function of GAN.
Args:
data_dir: location of training data
epochs: number of epochs to train
bs: batch_size
vs: validation size
lr: learning rate
mode: pnf or od
conv_dim: convolutional layer dimension
device: where to train the model
'''
# Training parameters
epochs = epochs
learning_rate = lr
batch_size = bs
valid_size = vs
# wrap essential info into dictionary:
hyps = {
'mn': 'GAN',
'bs': batch_size,
'vs': valid_size,
'lr': learning_rate,
'zs': z_size,
'ss': sample_size,
'cd': conv_dim,
'md': mode
}
data_set = ConvEncoderDatasetRAM(data_dir)
# split dataset for training and validation
num_train = len(data_set)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train)) # hard coded to 0.8
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SequentialSampler(train_idx)
valid_sampler = SequentialSampler(valid_idx)
train_loader = DataLoader(data_set, sampler=train_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
valid_loader = DataLoader(data_set, sampler=valid_sampler,
batch_size=batch_size, num_workers=0, drop_last=True)
# define discriminator and generator
D = Discriminator(conv_dim, mode=hyps['md'])
G = Generator(z_size=z_size, conv_dim=conv_dim, mode=hyps['md'])
# initialize model weights
D.apply(weights_init_normal)
G.apply(weights_init_normal)
print(D)
print()
print(G)
# model training device
if TRAIN_ON_MULTI_GPUS:
D = nn.DataParallel(D).cuda()
G = nn.DataParallel(G).cuda()
elif torch.cuda.is_available():
D = D.to(device)
G = G.to(device)
else:
print('Training on CPU, very long training time is expectable.')
# params
lr = 0.0002
beta1 = 0.5
beta2 = 0.999 # default value
# Create optimizers for the discriminator and generator
d_optimizer = optim.Adam(D.parameters(), lr, [beta1, beta2])
g_optimizer = optim.Adam(G.parameters(), lr, [beta1, beta2])
# Create optimizers for the discriminator and generator
if TRAIN_ON_MULTI_GPUS:
d_optimizer = optim.Adam(D.module.parameters(), lr, [beta1, beta2])
g_optimizer = optim.Adam(G.module.parameters(), lr, [beta1, beta2])
else:
d_optimizer = optim.Adam(D.parameters(), lr, [beta1, beta2])
g_optimizer = optim.Adam(G.parameters(), lr, [beta1, beta2])
G, D = train_GAN(D, G, d_optimizer, g_optimizer, epochs, z_size,
train_loader, valid_loader, sample_size, hyps,
device=device, print_every=100)
return G, D
if __name__ == '__main__':
# dataset folders selecting factors
mode = 'pnf'
year = '2018'
freq = '15min'
data_dir = str(Path(os.path.dirname(os.path.realpath(__file__))).resolve(
).parents[1].joinpath(f'data/processed/{mode}/{year}/{freq}/tensors'))
# run_recursive_training('VanillaLSTM', data_dir, 500, 512, 0.9, 0.01, sl=12, hd=512)
# run_classifier_training('ConvClassifier', data_dir, 50, 128, 0.8, 0.001, 2, device='cuda:1')
run_encoder_training('ConvAutoEncoderShallow', data_dir, 2000, 128, 0.8, 0.1,
mode=mode, hd=113, device='cuda:1')
# run_GAN_training(data_dir, 100, 64, 0.8, z_size=100, conv_dim=256, mode='pnf')
| 33.037307 | 139 | 0.59455 | 5,086 | 40,735 | 4.597719 | 0.118364 | 0.01655 | 0.01129 | 0.015053 | 0.608621 | 0.577831 | 0.528182 | 0.496493 | 0.476437 | 0.452917 | 0 | 0.011208 | 0.301289 | 40,735 | 1,232 | 140 | 33.064123 | 0.810379 | 0.28489 | 0 | 0.542113 | 0 | 0.004594 | 0.124277 | 0.021215 | 0 | 0 | 0 | 0 | 0.003063 | 1 | 0.022971 | false | 0 | 0.032159 | 0 | 0.070444 | 0.059724 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68d59a01d4c210fcbd52841f20fd474a440205a5 | 7,727 | py | Python | tests/time_tests.py | ankpradh/parallel-composition-RE | 8e1d1df8a27951445fd1d80e89febba89d275fdf | [
"MIT"
] | null | null | null | tests/time_tests.py | ankpradh/parallel-composition-RE | 8e1d1df8a27951445fd1d80e89febba89d275fdf | [
"MIT"
] | null | null | null | tests/time_tests.py | ankpradh/parallel-composition-RE | 8e1d1df8a27951445fd1d80e89febba89d275fdf | [
"MIT"
] | null | null | null | import sys
import time
import random
sys.path.append("..")
from pympler import asizeof
from test_automata import *
# For computing average runtimes
def avg_tests(runs, test, string, test_num):
test_avg_runs = runs
test_Ptime = 0
test_Ctime = 0
if test.__name__ in ["test4", "test5"]:
test_Stime = 0
test_Mtime = 0
for _ in range(test_avg_runs):
_time = test(string)
test_Ptime += _time[0]
test_Stime += _time[1]
test_Mtime += _time[2]
test_Ctime += _time[3]
print("Test %s (%s)\n------------------- " %(test_num, len(string)))
print("Computation time for Monolithic Enforcer : %f ms" %(test_Ptime/test_avg_runs))
print("Computation time for Serial Composition : %f ms" %(test_Stime/test_avg_runs))
print("Computation time for Maximal Prefix Parallel Composition : %f ms" %(test_Mtime/test_avg_runs))
print("Computation time for Parallel Composition : %f ms\n" %(test_Ctime/test_avg_runs))
else:
for _ in range(test_avg_runs):
_time = test(string)
test_Ptime += _time[0]
test_Ctime += _time[1]
print("Test %s (%s)\n------------------- " %(test_num, len(string)))
print("Computation time for Monolithic Enforcer : %f ms" %(test_Ptime/test_avg_runs))
print("Computation time for Parallel Composition : %f ms\n" %(test_Ctime/test_avg_runs))
# Generating random strings from given alphabet
def generate_strings(alphabet):
strings = []
ranges = [(10**i, 5*10**i) for i in range(1, 6)]
for size1, size2 in ranges:
strings.append("".join(random.choices(alphabet, k=size1)))
strings.append("".join(random.choices(alphabet, k=size2)))
return strings
# Tests for compositions of EM1 and EM2
# Monolithic Composition with 3*5 = 15 states
# Parallel Composition with 3+5 = 8 states
def test1(Input):
# Monolithic Test
tsP = time.time()
A, B = EM1(), EM2()
A_B = monolithic_enforcer('A_B', A, B)
tsP = time.time()
accept = A_B.checkAccept(Input)
teP = time.time()
# Parallel Composition Test
tsC = time.time()
A, B = EM1("pDFA"), EM2("pDFA")
A_B = parallel_enforcer(A, B)
tsC = time.time()
accept = A_B.checkAccept(Input)
teC = time.time()
return (teP - tsP)*1000, (teC - tsC)*1000
# Tests for compositions of EM1, EM2 and EM3
# Monolithic Composition with 3*5*7 = 105 states
# Parallel Composition with 3+5+7 = 15 states
def test2(Input):
# Monolithic Test
tsP = time.time()
A, B, C = EM1(), EM2(), EM3()
A_B_C = monolithic_enforcer('A_B_C', A, B, C)
tsP = time.time()
accept = A_B_C.checkAccept(Input)
teP = time.time()
if (SIZEOF):
print(asizeof.asized(A_B_C, detail=1).format())
# Parallel Composition Test
tsC = time.time()
A, B, C = EM1("pDFA"), EM2("pDFA"), EM3("pDFA")
A_B_C = parallel_enforcer(A, B, C)
tsC = time.time()
accept = A_B_C.checkAccept(Input)
teC = time.time()
return (teP - tsP)*1000, (teC - tsC)*1000
# Tests for compositions of EM4, EM5, EM6, EM7, EM8 and EM9
# Monolithic Composition with 2*3*4*5*6*7 = 5040 states
# Parallel Composition with 2+3+4+5+6+7 = 27 states
def test3(Input):
# Monolithic Test
tsP = time.time()
R1, R2, R3, R4, R5, R6 = EM4(), EM5(), EM6(), EM7(), EM8(), EM9()
R = monolithic_enforcer('R', R1, R2, R3, R4, R5, R6)
tsP = time.time()
accept = R.checkAccept(Input)
teP = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
# Parallel Composition Test
tsC = time.time()
R1, R2, R3, R4, R5, R6 = EM4("pDFA"), EM5("pDFA"), EM6("pDFA"), EM7("pDFA"), EM8("pDFA"), EM9("pDFA")
R = parallel_enforcer(R1, R2, R3, R4, R5, R6)
tsC = time.time()
accept = R.checkAccept(Input)
teC = time.time()
return (teP - tsP)*1000, (teC - tsC)*1000
# Tests for compositions of EM10, EM11, EM12 (Safety properties)
# Monolithic Composition with 3*4*3 = 36 states
# Serial and (both) Parallel Composition with 3+4+3 = 10 states
def test4(Input):
# Monolithic Test
tsP = time.time()
RS, RT, RU = EM10(), EM11(), EM12()
R = monolithic_enforcer('R', RS, RT, RU)
tsP = time.time()
accept = R.checkAccept(Input)
teP = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
# Serial Composition Test
tsS = time.time()
RS, RT, RU = EM10("DFA"), EM11("DFA"), EM12("DFA")
R = serial_composition_enforcer(RS, RT, RU)
tsS = time.time()
accept = R.checkAccept(Input)
teS = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
# Maximal Prefix Parallel Composition Test
tsM = time.time()
RS, RT, RU = EM10("pDFA"), EM11("pDFA"), EM12("pDFA")
R = maximal_prefix_parallel_enforcer(RS, RT, RU)
tsM = time.time()
accept = R.checkAccept(Input)
teM = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
# Parallel Composition Test
tsC = time.time()
RS, RT, RU = EM10("pDFA"), EM11("pDFA"), EM12("pDFA")
R = parallel_enforcer(RS, RT, RU)
tsC = time.time()
accept = R.checkAccept(Input)
teC = time.time()
if (SIZEOF):
print(asizeof.asized(R, detail=1).format())
return (teP - tsP)*1000, (teS - tsS)*1000, (teM - tsM)*1000, (teC - tsC)*1000
# Tests for compositions of EM13, EM14, EM15 (Co-safety properties)
# Monolithic Composition with 4*5*3 = 60 states
# Serial and (both) Parallel Composition with 4+5+3 = 12 states
def test5(Input):
# Monolithic Test
tsP = time.time()
RCS, RCT, RCU = EM13(), EM14(), EM15()
RC = monolithic_enforcer('RC', RCS, RCT, RCU)
tsP = time.time()
accept = RC.checkAccept(Input)
teP = time.time()
if (SIZEOF):
print(asizeof.asized(RC, detail=1).format())
# Serial Composition Test
tsS = time.time()
RCS, RCT, RCU = EM13("DFA"), EM14("DFA"), EM15("DFA")
RC = serial_composition_enforcer(RCS, RCT, RCU)
tsS = time.time()
accept = RC.checkAccept(Input)
teS = time.time()
if (SIZEOF):
print(asizeof.asized(RC, detail=1).format())
# Maximal Prefix Parallel Composition Test
tsM = time.time()
RCS, RCT, RCU = EM13("pDFA"), EM14("pDFA"), EM15("pDFA")
RC = maximal_prefix_parallel_enforcer(RCS, RCT, RCU)
tsM = time.time()
accept = RC.checkAccept(Input)
teM = time.time()
if (SIZEOF):
print(asizeof.asized(RC, detail=1).format())
# Parallel Composition Test
tsC = time.time()
RCS, RCT, RCU = EM13("pDFA"), EM14("pDFA"), EM15("pDFA")
RC = parallel_enforcer(RCS, RCT, RCU)
tsC = time.time()
accept = RC.checkAccept(Input)
teC = time.time()
if (SIZEOF):
print(asizeof.asized(RC, detail=1).format())
return (teP - tsP)*1000, (teS - tsS)*1000, (teM - tsM)*1000, (teC - tsC)*1000
if __name__ == '__main__':
Input1 = str(bin(15*1859))[2:]
Input2 = "33322555556666661111444422"
Input3 = "bbbbbbbbbbbbbbabbbbbbbbbbbbbb"
avg_tests(1000, test1, Input1, 1)
avg_tests(1000, test2, Input1, 2)
avg_tests(1000, test3, Input2, 3)
avg_tests(1000, test4, Input3, 4)
avg_tests(1000, test5, Input3, 5)
strings1 = generate_strings('01')
strings2 = generate_strings('123456')
strings3 = generate_strings('abc')
for string in strings1:
avg_tests(1000, test2, string, 2)
for string in strings2:
avg_tests(1000, test3, string, 3)
for string in strings3:
avg_tests(100, test4, string, 4)
avg_tests(100, test5, string, 5)
if (SIZEOF):
print(EM_size)
| 30.662698 | 109 | 0.613045 | 1,083 | 7,727 | 4.270545 | 0.144044 | 0.072649 | 0.042378 | 0.034595 | 0.663784 | 0.595459 | 0.524541 | 0.471784 | 0.428541 | 0.390703 | 0 | 0.063358 | 0.236055 | 7,727 | 251 | 110 | 30.784861 | 0.720142 | 0.1531 | 0 | 0.557471 | 0 | 0 | 0.089834 | 0.016124 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04023 | false | 0 | 0.028736 | 0 | 0.103448 | 0.109195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68d6234592f2e3a24cd47ab684d14a6a36f999f3 | 4,815 | py | Python | language/evm/hardhat-examples/compile_move.py | lxfind/move | 2e7c37edada44436805e047dd26724a26c07635a | [
"Apache-2.0"
] | 63 | 2021-12-22T10:17:18.000Z | 2022-03-31T22:03:06.000Z | language/evm/hardhat-examples/compile_move.py | lxfind/move | 2e7c37edada44436805e047dd26724a26c07635a | [
"Apache-2.0"
] | 150 | 2021-11-04T20:16:14.000Z | 2022-03-31T23:00:21.000Z | language/evm/hardhat-examples/compile_move.py | lxfind/move | 2e7c37edada44436805e047dd26724a26c07635a | [
"Apache-2.0"
] | 63 | 2021-11-04T19:32:56.000Z | 2022-03-30T16:28:41.000Z | #!/usr/local/bin/python3
# This is a script to compile Move source code into artifacts that can be used for testing.
# Copy this to the root of your hardhat project to use it.
#
# Note: this is a temporary solution that will be phased out once we implement the Move plugin.
#
# Move code should be stored within the `contracts` directory, along with an ABI file.
# - contracts
# - MyContract.move
# - MyContract.abi.json
#
# The ABI file should look something like this:
# [
# {
# "inputs": [],
# "name": "foo",
# "outputs": [
# {
# "internalType": "uint256",
# "name": "",
# "type": "uint256"
# }
# ],
# "stateMutability": "view",
# "type": "function"
# }
# ]
dependencies = [
"../stdlib/sources",
"../../move-stdlib/sources"
]
named_address_mapping = {
"Std": "0x1",
"Evm": "0x2"
}
import os
import shutil
import tempfile
import subprocess
import json
import sys
from os import path
path_root = path.dirname(__file__)
path_contracts = path.join(path_root, "contracts")
path_artifacts = path.join(path_root, "artifacts", "contracts")
path_home = path.expanduser("~")
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def locate_solc():
p = path.join(path_home, "bin", "solc")
if path.isfile(p):
return p
p = shutil.which("solc")
if p is not None:
return p
eprint("Failed to locate solc.")
exit(1)
def locate_move_to_yul():
p = shutil.which("move-to-yul")
if p is not None:
return p
eprint("Failed to locate move-to-yul -- you can install it by running `cargo install --path <path to the move-to-yul crate>.`")
exit(1)
path_solc = locate_solc()
path_move_to_yul = locate_move_to_yul()
def list_move_sources():
paths = []
for name in os.listdir(path_contracts):
path_ = path.join(path_contracts, name)
if path.isfile(path_) and path.splitext(path_)[1] == ".move":
paths.append(path_)
return paths
def load_abi(path_source):
path_abi = path.splitext(path_source)[0] + ".abi.json"
if not path.isfile(path_abi):
eprint()
eprint("Missing ABI definition: {}.".format(path_abi))
exit(1)
with open(path_abi, "r") as f:
text = f.read()
return json.loads(text)
def move_to_yul(path_source):
with tempfile.NamedTemporaryFile() as output_file:
args = [path_move_to_yul, "--output", output_file.name]
if len(dependencies) > 0:
args.append("-d")
args.extend(dependencies)
if len(named_address_mapping) > 0:
args.append("-n")
for (name, addr) in named_address_mapping.items():
args.append("{}={}".format(name, addr))
path_abi = path.splitext(path_source)[0] + ".abi.json"
args.append("--abi-output")
args.append(path_abi)
args.extend(["--", path_source])
move_to_yul_res = subprocess.run(args, capture_output = True)
if move_to_yul_res.returncode != 0:
eprint()
eprint(move_to_yul_res.stderr.decode("utf-8"))
exit(1)
return output_file.read()
def solc(path_source, yul_code):
solc_res = subprocess.run([path_solc, "--optimize", "--strict-assembly", "--bin", "-"], input = yul_code, capture_output = True)
if solc_res.returncode != 0:
eprint()
eprint(solc_res.stderr.decode("utf-8"))
exit(1)
output = solc_res.stdout.decode("utf-8")
return "0x{}".format(output.split("Binary representation:")[1].replace("\n", ""))
def gen_artifact(path_source, abi, bytecode):
basename = path.basename(path_source)
contract_name = path.splitext(basename)[0]
path_artifact = path.join(path_artifacts, basename)
if not path.isdir(path_artifact):
if path.exists(path_artifact):
eprint("Failed to generate artifact. Path {} already exists, but it's not a directory.".format(path_artifact))
exit(1)
os.makedirs(path_artifact)
artifact = {
"_format": "hh-sol-artifact-1",
"contractName": contract_name,
"sourceName": path_source,
"abi": abi,
"bytecode": bytecode,
"deployedBytecode": bytecode,
"linkReferences": {},
"deployedLinkReferences": {}
}
with open(path.join(path_artifact, contract_name + ".json"), "w") as f:
json.dump(artifact, f, indent = 4)
def run(path_source):
print("Compiling {}...".format(path_source))
yul_code = move_to_yul(path_source)
abi = load_abi(path_source)
bytecode = solc(path_source, yul_code)
gen_artifact(path_source, abi, bytecode)
for path_source in list_move_sources():
run(path_source)
print("Success.")
| 27.672414 | 132 | 0.620561 | 623 | 4,815 | 4.624398 | 0.292135 | 0.059007 | 0.037487 | 0.01562 | 0.137452 | 0.091635 | 0.06942 | 0.052759 | 0.052759 | 0.027074 | 0 | 0.008741 | 0.239668 | 4,815 | 173 | 133 | 27.83237 | 0.778203 | 0.146833 | 0 | 0.142857 | 0 | 0.008929 | 0.151188 | 0.011517 | 0 | 0 | 0.00147 | 0 | 0 | 1 | 0.080357 | false | 0 | 0.0625 | 0 | 0.205357 | 0.116071 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68d99f26e2d322b5f1410136dc8d32fdc852ce2d | 4,497 | py | Python | tuiuiu/tuiuiuredirects/views.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | 3 | 2019-08-08T09:09:35.000Z | 2020-12-15T18:04:17.000Z | tuiuiu/tuiuiuredirects/views.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | null | null | null | tuiuiu/tuiuiuredirects/views.py | caputomarcos/tuiuiu.io | d8fb57cf95487e7fe1454b2130ef18acc916da46 | [
"BSD-3-Clause"
] | 1 | 2017-09-09T20:10:40.000Z | 2017-09-09T20:10:40.000Z | from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from tuiuiu.utils.pagination import paginate
from tuiuiu.tuiuiuadmin import messages
from tuiuiu.tuiuiuadmin.forms import SearchForm
from tuiuiu.tuiuiuadmin.utils import PermissionPolicyChecker, permission_denied
from tuiuiu.tuiuiuredirects import models
from tuiuiu.tuiuiuredirects.forms import RedirectForm
from tuiuiu.tuiuiuredirects.permissions import permission_policy
permission_checker = PermissionPolicyChecker(permission_policy)
@permission_checker.require_any('add', 'change', 'delete')
@vary_on_headers('X-Requested-With')
def index(request):
query_string = request.GET.get('q', "")
ordering = request.GET.get('ordering', 'old_path')
redirects = models.Redirect.objects.prefetch_related('redirect_page', 'site')
# Search
if query_string:
redirects = redirects.filter(old_path__icontains=query_string)
# Ordering (A bit useless at the moment as only 'old_path' is allowed)
if ordering not in ['old_path']:
ordering = 'old_path'
redirects = redirects.order_by(ordering)
# Pagination
paginator, redirects = paginate(request, redirects)
# Render template
if request.is_ajax():
return render(request, "tuiuiuredirects/results.html", {
'ordering': ordering,
'redirects': redirects,
'query_string': query_string,
})
else:
return render(request, "tuiuiuredirects/index.html", {
'ordering': ordering,
'redirects': redirects,
'query_string': query_string,
'search_form': SearchForm(
data=dict(q=query_string) if query_string else None, placeholder=_("Search redirects")
),
'user_can_add': permission_policy.user_has_permission(request.user, 'add'),
})
@permission_checker.require('change')
def edit(request, redirect_id):
theredirect = get_object_or_404(models.Redirect, id=redirect_id)
if not permission_policy.user_has_permission_for_instance(
request.user, 'change', theredirect
):
return permission_denied(request)
if request.method == 'POST':
form = RedirectForm(request.POST, request.FILES, instance=theredirect)
if form.is_valid():
form.save()
messages.success(request, _("Redirect '{0}' updated.").format(theredirect.title), buttons=[
messages.button(reverse('tuiuiuredirects:edit', args=(theredirect.id,)), _('Edit'))
])
return redirect('tuiuiuredirects:index')
else:
messages.error(request, _("The redirect could not be saved due to errors."))
else:
form = RedirectForm(instance=theredirect)
return render(request, "tuiuiuredirects/edit.html", {
'redirect': theredirect,
'form': form,
'user_can_delete': permission_policy.user_has_permission(request.user, 'delete'),
})
@permission_checker.require('delete')
def delete(request, redirect_id):
theredirect = get_object_or_404(models.Redirect, id=redirect_id)
if not permission_policy.user_has_permission_for_instance(
request.user, 'delete', theredirect
):
return permission_denied(request)
if request.method == 'POST':
theredirect.delete()
messages.success(request, _("Redirect '{0}' deleted.").format(theredirect.title))
return redirect('tuiuiuredirects:index')
return render(request, "tuiuiuredirects/confirm_delete.html", {
'redirect': theredirect,
})
@permission_checker.require('add')
def add(request):
if request.method == 'POST':
form = RedirectForm(request.POST, request.FILES)
if form.is_valid():
theredirect = form.save()
messages.success(request, _("Redirect '{0}' added.").format(theredirect.title), buttons=[
messages.button(reverse('tuiuiuredirects:edit', args=(theredirect.id,)), _('Edit'))
])
return redirect('tuiuiuredirects:index')
else:
messages.error(request, _("The redirect could not be created due to errors."))
else:
form = RedirectForm()
return render(request, "tuiuiuredirects/add.html", {
'form': form,
})
| 35.690476 | 103 | 0.681565 | 483 | 4,497 | 6.165631 | 0.26087 | 0.033244 | 0.031901 | 0.057085 | 0.401276 | 0.390866 | 0.370047 | 0.314305 | 0.314305 | 0.243116 | 0 | 0.003368 | 0.207694 | 4,497 | 125 | 104 | 35.976 | 0.832445 | 0.022682 | 0 | 0.416667 | 0 | 0 | 0.153759 | 0.045786 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.270833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68d9c2874c4137a2cc2a81b3b6c8979882134cbf | 2,639 | py | Python | fd_lib/feature_engineering.py | fdavidsen/Personal-Data-Science-Projects | 4167744295c96e3f984830b6203428ea41b111e7 | [
"MIT"
] | null | null | null | fd_lib/feature_engineering.py | fdavidsen/Personal-Data-Science-Projects | 4167744295c96e3f984830b6203428ea41b111e7 | [
"MIT"
] | null | null | null | fd_lib/feature_engineering.py | fdavidsen/Personal-Data-Science-Projects | 4167744295c96e3f984830b6203428ea41b111e7 | [
"MIT"
] | null | null | null | from itertools import combinations
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
class BestPair:
''' Find out the feature pairs with the highest correlation (positive and negative). '''
def __init__(self, X, y, how='product'):
self.X = X
self.y = y
self.how = how
self._fit()
def _fit(self):
self.corr_list = []
for row, col in combinations(self.X._get_numeric_data(), r=2):
if self.how == 'product':
self._corr_append('product', self.product(row, col), row, col)
elif self.how == 'distance':
self._corr_append('distance', self.distance(row, col), row, col)
elif self.how == 'all':
self._corr_append('product', self.product(row, col), row, col)
self._corr_append('distance', self.distance(row, col), row, col)
self.corr_list = sorted(self.corr_list, key=lambda item: -item[3])
def product(self, row, col):
return self.X[row] * self.X[col]
def distance(self, row, col):
return (self.X[row]**2 + self.X[col]**2)**0.5
def _corr_append(self, how, new_col, row, col):
self.corr_list.append((how, row, col, self.y.corr(new_col)))
def get_list(self, top=1):
upper_top = self.corr_list[:top]
lower_top = self.corr_list[-top:]
return upper_top + lower_top
def mold(self, top=1):
top_corr = self.get_list(top)
how_space = max([len(item[0]) for item in top_corr])
row_space = max([len(item[1]) for item in top_corr])
col_space = max([len(item[2]) for item in top_corr])
for how, row, col, score in top_corr:
line = '[{:>%(how_space)s}] {:>%(row_space)s} & {:<%(col_space)s} | {:.3f}' % locals()
print(line.format(how.title(), row, col, score))
class CustomFeature(BaseEstimator, TransformerMixin):
''' Select custom features to add to the feature matrix. '''
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.copy()
for how, row, col in self.columns:
if how == 'product':
result = X[row] * X[col]
elif how == 'distance':
result = (X[row]**2 + X[col]**2)**0.5
X['{}_{}'.format(row, col)] = result
return X | 33.405063 | 102 | 0.530125 | 341 | 2,639 | 3.958944 | 0.240469 | 0.075556 | 0.053333 | 0.035556 | 0.282222 | 0.20963 | 0.185185 | 0.133333 | 0.133333 | 0.133333 | 0 | 0.009101 | 0.333839 | 2,639 | 79 | 103 | 33.405063 | 0.758817 | 0.050777 | 0 | 0.076923 | 0 | 0 | 0.058186 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.057692 | 0.057692 | 0.384615 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68da0b7e3b7b6e70fde041111bd397e4346b2c04 | 3,778 | py | Python | benchmarks/cocompilation_benchmarks.py | SanggunLee/edgetpu | d3cf166783265f475c1ddba5883e150ee84f7bfe | [
"Apache-2.0"
] | 2 | 2020-05-07T22:34:16.000Z | 2020-09-03T20:30:37.000Z | benchmarks/cocompilation_benchmarks.py | SanggunLee/edgetpu | d3cf166783265f475c1ddba5883e150ee84f7bfe | [
"Apache-2.0"
] | null | null | null | benchmarks/cocompilation_benchmarks.py | SanggunLee/edgetpu | d3cf166783265f475c1ddba5883e150ee84f7bfe | [
"Apache-2.0"
] | 1 | 2020-01-08T05:55:58.000Z | 2020-01-08T05:55:58.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark of cocompiled models.
Benchmark are measured with CPU 'performance' mode. To enable it, you need to
install 'cpupower' and run:
sudo cpupower frequency-set --governor performance
The reference number is measured on:
- 'x86_64': Intel Xeon W-2135(4.50GHz) + Edge TPU accelarator + USB 3.0
- 'rp3b': Raspberry Pi 3 B (version1.2)+ Edge TPU accelarator + USB 2.0
- 'rp3b+': Raspberry Pi 3 B+ (version1.3)+ Edge TPU accelarator + USB 2.0
- 'aarch64': Edge TPU dev board.
"""
import time
import timeit
from edgetpu.basic import edgetpu_utils
from edgetpu.basic.basic_engine import BasicEngine
import numpy as np
import test_utils
def _run_inferences(engines, input_data_list):
"""Runs an iteration of inferences for each engine with a random inpt.
Args:
engines: list of basic engines.
input_data_list: list of random input data.
"""
for engine, input_data in zip(engines, input_data_list):
engine.run_inference(input_data)
def _run_benchmark_for_cocompiled_models(model_names):
"""Runs benchmark for a given model set with random inputs. Models run
inferences alternately with random inputs. It benchmarks the total time
running each model once.
Args:
model_names: list of string, file names of the models.
Returns:
float, average sum of inferences times.
"""
iterations = 200
print('Benchmark for ', model_names)
engines = []
input_data_list = []
edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
for model_name in model_names:
# Run models on a single edgetpu to achieve accurate benchmark results.
engine = BasicEngine(test_utils.test_data_path(model_name), edge_tpus[0])
# Prepare a random generated input.
input_size = engine.required_input_array_size()
random_input = test_utils.generate_random_input(1, input_size)
# Convert it to a numpy.array.
input_data = np.array(random_input, dtype=np.uint8)
engines.append(engine)
input_data_list.append(input_data)
benchmark_time = timeit.timeit(
lambda: _run_inferences(engines, input_data_list),
number=iterations)
# Time consumed for each iteration (milliseconds).
time_per_inference = (benchmark_time / iterations) * 1000
print(time_per_inference, 'ms (iterations = ', iterations, ')')
return time_per_inference
if __name__ == '__main__':
args = test_utils.parse_args()
machine = test_utils.machine_info()
test_utils.check_cpu_scaling_governor_status()
# Read references from csv file.
modelsets_list, reference = test_utils.read_reference(
'cocompilation_reference_%s.csv' % machine)
total_modelsets = len(modelsets_list)
# Put column names in first row.
results = [('MODELS', 'INFERENCE_TIME')]
for cnt, modelsets in enumerate(modelsets_list, start=1):
print('-------------- Models ', cnt, '/', total_modelsets, ' ---------------')
results.append((modelsets, _run_benchmark_for_cocompiled_models(modelsets.split(','))))
test_utils.save_as_csv(
'cocompilation_benchmarks_%s_%s.csv' % (
machine, time.strftime('%Y%m%d-%H%M%S')),
results)
test_utils.check_result(reference, results, args.enable_assertion)
| 34.981481 | 91 | 0.736633 | 531 | 3,778 | 5.043315 | 0.387947 | 0.036968 | 0.029126 | 0.037341 | 0.084018 | 0.060866 | 0.019417 | 0 | 0 | 0 | 0 | 0.014626 | 0.167549 | 3,778 | 107 | 92 | 35.308411 | 0.836884 | 0.454209 | 0 | 0 | 0 | 0 | 0.088367 | 0.031952 | 0 | 0 | 0 | 0 | 0.022222 | 1 | 0.044444 | false | 0 | 0.133333 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68da25d88380998dc70170f5b963f169ab2d9195 | 11,849 | py | Python | pygl.py | sidd5sci/game-engin | 8aa580ae79083c6420e004720ea91650029caf2c | [
"Apache-2.0"
] | null | null | null | pygl.py | sidd5sci/game-engin | 8aa580ae79083c6420e004720ea91650029caf2c | [
"Apache-2.0"
] | null | null | null | pygl.py | sidd5sci/game-engin | 8aa580ae79083c6420e004720ea91650029caf2c | [
"Apache-2.0"
] | null | null | null | #Import OpenGL and GLU. Don't import GLUT because it is ancient, broken, inflexible, and poorly
#designed--and we aren't using it.
from OpenGL.GL import *
from OpenGL.GLU import *
#Import PyGame. We'll mostly just use this to make a window. Also import all the local
#declarations (e.g. pygame.KEYDOWN, etc.), so that we don't have to keep typing "pygame." in front
#of everything. E.g., now we can do "KEYDOWN" instead of "pygame.KEYDOWN".
import pygame
from pygame.locals import *
#Import some other useful modules
import sys, os, traceback
#Center the window on the screen, if we're on Windows, which supports it.
if sys.platform in ["win32","win64"]: os.environ["SDL_VIDEO_CENTERED"]="1"
#Import sin, cos, radians, degrees, etc.
from math import *
#Initialize PyGame. You could also call "pygame.init()", but in my experience this can be faster
#(since you aren't initializing *everything*) and more portable (since some modules may require
#extra dependencies).
pygame.display.init()
pygame.font.init()
#Screen configuration
screen_size = [800,600]
multisample = 0
#Set the window's icon, as applicable, to be just a transparent square.
icon = pygame.Surface((1,1)); icon.set_alpha(0); pygame.display.set_icon(icon)
#Set the title of the window.
pygame.display.set_caption("PyOpenGL Example - Ian Mallett - v.1.0.0 - 2013")
#Set the window to be multisampled. This does depth testing at a higher resolution, leading to
#smooth, antialiased edges. Most computers support at least multisample=4, and most support more
#(e.g. mine does 16).
if multisample:
pygame.display.gl_set_attribute(GL_MULTISAMPLEBUFFERS,1)
pygame.display.gl_set_attribute(GL_MULTISAMPLESAMPLES,multisample)
#Create the window of the requested size. The pygame.OPENGL flag tells it to allow OpenGL to write
#directly to the window context. The pygame.DOUBLEBUF flag tells it to make the window
#doublebuffered. This causes the screen to only show a completed image. This function actually
#returns a "surface" object, but it isn't useful for OpenGL programs.
pygame.display.set_mode(screen_size,OPENGL|DOUBLEBUF)
#If we draw a new pixel, we want to blend the new pixel with whatever is already there. This allows
#for transparency, among other things. Since everything here is fully opaque, we don't actually
#*need* this right now.
##glEnable(GL_BLEND)
##glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA)
#Enable textured objects. The glTexEnvi calls set up texturing in an intuitive way. Again, since
#nothing here is textured, we don't actually *need* this right now.
##glEnable(GL_TEXTURE_2D)
##glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_MODULATE)
##glTexEnvi(GL_POINT_SPRITE,GL_COORD_REPLACE,GL_TRUE)
#This requests that OpenGL make interpolation (filling in triangles) happen in the nicest way
#possible. It's not guaranteed to happen; it's a request.
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST)
#This enables depth testing (so that closer objects are always drawn in front of farther objects).
#If depth testing is not enabled, then objects are drawn "over" each other in the order you draw
#them. For most 3D rendering, you'll want depth testing enabled.
glEnable(GL_DEPTH_TEST)
#This concludes setup; the program itself will be a single triangle drawn in white at positions
#(0.0,0.0,0.0), (0.8,0.0,0.0), (0.0,0.0,0.4), along with a red, green, and blue line segments
#showing the axes.
#I find that an intuitive basic setup for the camera (where you're looking from) is to have the
#viewer located on the surface of a sphere surrounding everything. You can change your position on
#the sphere, and thus fly around the scene. To do this, I put the camera in (a kind of) spherical
#coordinates.
camera_rot = [30.0,20.0] #The spherical coordinates' angles (degrees).
camera_radius = 3.0 #The sphere's radius
camera_center = [0.0,0.0,0.0] #The sphere's center
def get_input():
global camera_rot, camera_radius
#Input in PyGame is pretty straightforward. For now, we are concerned only with key and mouse
#input. Whenever anything *happens* (move the mouse, click, etc.), an "event" happens. You get
#a list of the events that happened by calling "pygame.event.get()". You can also query the
#*state* of anything by checking it specifically.
#Check the *state* of the keys, the mouse buttons, and the mouse's position within the window.
keys_pressed = pygame.key.get_pressed()
mouse_buttons = pygame.mouse.get_pressed()
mouse_position = pygame.mouse.get_pos()
#Check how much the mouse moved since you last called this function.
mouse_rel = pygame.mouse.get_rel()
#List all the events that happened.
for event in pygame.event.get():
#Clicked the little "X"; close the window (return False breaks the main loop).
if event.type == QUIT: return False
#If the user pressed a key:
elif event.type == KEYDOWN:
#If the user pressed the escape key, close the window.
if event.key == K_ESCAPE: return False
#If the user "clicked" the scroll wheel forward or backward:
elif event.type == MOUSEBUTTONDOWN:
#Zoom in
if event.button == 4: camera_radius *= 0.9
#Or out.
elif event.button == 5: camera_radius /= 0.9
#If the user is left-clicking, then move the camera about in the spherical coordinates.
if mouse_buttons[0]:
camera_rot[0] += mouse_rel[0]
camera_rot[1] += mouse_rel[1]
return True
def draw():
#Clear the screen's color and depth buffers so we have a fresh space to draw geometry onto.
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
#Setup the viewport (the area of the window to draw into)
glViewport(0,0,screen_size[0],screen_size[1])
#Change the matrix mode to the projection matrix (all subsequent calls that change matrices will
#change the projection matrix). The projection matrix should be made responsible for taking all
#the geometry in the 3D world and then distorting it so that it is in perspective on the screen.
glMatrixMode(GL_PROJECTION)
#Set the current matrix (the projection matrix) to be the identity matrix.
glLoadIdentity()
#Multiply the current matrix (the projection matrix) by a matrix that projects everything like a
#camera would. Basically, this makes everything look like it's in perspective. In this case,
#the camera has a (vertical) field of view of 45 degrees, an aspect ratio of 800.0/600.0, a near
#clipping plane of 0.1, and a far clipping plane of 100.0. The clipping planes tell you how
#close and far away from the camera you can see things. Ideally, you'd set them to 0.0 and
#infinity, but the clipping planes also affect the depth buffer; setting them farther apart
#means objects don't occlude each other as correctly (the depth buffer is stretched over a
#larger distance). The general rule is to set the near clipping plane as large as possible (and
#*never* to 0.0), and then make your far plane reasonably small.
gluPerspective(45, float(screen_size[0])/float(screen_size[1]), 0.1,100.0)
#Change the matrix mode to the modelview matrix (all subsequent calls that change matrices will
#change the modelview matrix). The modelview matrix should be made responsible for moving
#things around the world (the "model" part of the name) and also making it look like the camera
#is in a particular position (the "view" part of the name).
glMatrixMode(GL_MODELVIEW)
#Set the current matrix (the modelview matrix) to be the identity matrix.
glLoadIdentity()
#The matrices stay the way they are until they are changed. Since the projection matrix doesn't
#actually change from frame to frame, one *could* only set it once. You will see this approach
#in other tutorials. This isn't a good idea, since more advanced techniques (e.g. image-space
#techniques) require the projection matrix to constantly change.
#Set the camera's position to be in spherical coordinates. These aren't typical spherical
#coordinates, since I take the elevation angle (camera_rot[1]) to be 0.0 at the horizon. I find
#this more intuitive, but you can easily change it to your favorite parameterization by
#exchanging sines and cosines.
camera_pos = [
camera_center[0] + camera_radius*cos(radians(camera_rot[0]))*cos(radians(camera_rot[1])),
camera_center[1] + camera_radius *sin(radians(camera_rot[1])),
camera_center[2] + camera_radius*sin(radians(camera_rot[0]))*cos(radians(camera_rot[1]))
]
#This multiplies the current matrix (the modelview matrix) by a matrix that makes it *look like*
#all subsequent draw calls had the camera at the given position and direction. In reality, it
#actually rotates and translates *the whole world* so that it *looks* that way, but the effect
#is the same. Here, the camera has position "camera_pos" and is oriented so that it is looking
#towards position "camera_center". The last three arguments tell it which way is up.
gluLookAt(
camera_pos[0],camera_pos[1],camera_pos[2],
camera_center[0],camera_center[1],camera_center[2],
0,1,0
)
#Okay! Let's start *actually drawing stuff*! We use "immediate mode" OpenGL here, which is
#obsoleted by vertex arrays and VBOs. Still, immediate mode is far more intuitive, so it is the
#method we'll use here.
#Set the color to white. All subsequent geometry we draw will be white. This is actually the
#default, so we didn't *need* to do this.
glColor3f(1,1,1)
#Start drawing triangles. Each subsequent triplet of glVertex*() calls will draw one triangle.
glBegin(GL_TRIANGLES)
glVertex3f(0.0,0.0,0.0) #Make a vertex at (0.0,0.0,0.0)
glVertex3f(0.8,0.0,0.0) #Make a vertex at (0.8,0.0,0.0)
glVertex3f(0.0,0.0,0.4) #Make a vertex at (0.0,0.0,0.4)
#Now that we've made one triplet of glVertex*() calls, it will draw one (white) triangle between
#those three points. We're done drawing triangles; tell OpenGL so.
glEnd()
#Start drawing lines. Each subsequent pair of glVertex*() calls will draw one line.
glBegin(GL_LINES)
#Change the color to red. All subsequent geometry we draw will be red.
glColor3f(1,0,0)
#Make two vertices, thereby drawing a (red) line.
glVertex(0,0,0); glVertex3f(1,0,0)
#Change the color to green. All subsequent geometry we draw will be green.
glColor3f(0,1,0)
#Make two vertices, thereby drawing a (green) line.
glVertex(0,0,0); glVertex3f(0,1,0)
#Change the color to blue. All subsequent geometry we draw will be blue.
glColor3f(0,0,1)
#Make two vertices, thereby drawing a (blue) line.
glVertex(0,0,0); glVertex3f(0,0,1)
#Change the color to white again. All subsequent geometry we draw will be white. Strictly
#speaking this isn't required (since we reset the color on line 166 before we draw anything
#again). However, it is good practice to reset the color to white, since forgetting to can be a
#hard-to-track-down bug (e.g. when combining with texturing).
glColor3f(1,1,1)
#We're done drawing lines; tell OpenGL so.
glEnd()
#Flip the buffer (draw the internal memory we've been using onto the screen). This is why we
#passed pygame.DOUBLEBUF when we created the window.
pygame.display.flip()
def main():
clock = pygame.time.Clock()
while True:
if not get_input(): break
draw()
clock.tick(60) #Regulate the framerate to be as close as possible to 60Hz.
pygame.quit()
if __name__ == "__main__":
try:
main()
except:
traceback.print_exc()
pygame.quit()
input()
| 54.856481 | 100 | 0.720314 | 1,923 | 11,849 | 4.382735 | 0.279771 | 0.013764 | 0.01317 | 0.011865 | 0.159587 | 0.14701 | 0.084124 | 0.048291 | 0.038206 | 0.022544 | 0 | 0.022412 | 0.197907 | 11,849 | 215 | 101 | 55.111628 | 0.864373 | 0.703941 | 0 | 0.091954 | 0 | 0 | 0.024823 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.068966 | 0 | 0.114943 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68e1f5cc56b5a7a07848391ac060d809aa03eb20 | 6,827 | py | Python | cogdl/models/nn/pyg_gpt_gnn.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/models/nn/pyg_gpt_gnn.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/models/nn/pyg_gpt_gnn.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | from typing import Any, Union, Type, Optional
from cogdl.models import register_model
from cogdl.models.supervised_model import (
SupervisedHomogeneousNodeClassificationModel,
SupervisedHeterogeneousNodeClassificationModel,
)
from cogdl.trainers.gpt_gnn_trainer import (
GPT_GNNHomogeneousTrainer,
GPT_GNNHeterogeneousTrainer,
)
#
# @register_model("gpt_gnn")
# class GPT_GNN(BaseModel):
# def __init__(
# self,
# in_dim,
# n_hid,
# num_types,
# num_relations,
# n_heads,
# n_layers,
# dropout=0.2,
# conv_name="hgt",
# prev_norm=False,
# last_norm=False,
# use_RTE=True,
# ):
# super(GPT_GNN, self).__init__()
# self.gcs = nn.ModuleList()
# self.num_types = num_types
# self.in_dim = in_dim
# self.n_hid = n_hid
# self.adapt_ws = nn.ModuleList()
# self.drop = nn.Dropout(dropout)
# for t in range(num_types):
# self.adapt_ws.append(nn.Linear(in_dim, n_hid))
# for l in range(n_layers - 1):
# self.gcs.append(
# GeneralConv(
# conv_name,
# n_hid,
# n_hid,
# num_types,
# num_relations,
# n_heads,
# dropout,
# use_norm=prev_norm,
# use_RTE=use_RTE,
# )
# )
# self.gcs.append(
# GeneralConv(
# conv_name,
# n_hid,
# n_hid,
# num_types,
# num_relations,
# n_heads,
# dropout,
# use_norm=last_norm,
# use_RTE=use_RTE,
# )
# )
#
# def forward(self, node_feature, node_type, edge_time, edge_index, edge_type):
# res = torch.zeros(node_feature.size(0), self.n_hid).to(node_feature.device)
# for t_id in range(self.num_types):
# idx = node_type == int(t_id)
# if idx.sum() == 0:
# continue
# res[idx] = torch.tanh(self.adapt_ws[t_id](node_feature[idx]))
# meta_xs = self.drop(res)
# del res
# for gc in self.gcs:
# meta_xs = gc(meta_xs, node_type, edge_index, edge_type, edge_time)
# return meta_xs
@register_model("gpt_gnn")
class GPT_GNN(
SupervisedHomogeneousNodeClassificationModel,
SupervisedHeterogeneousNodeClassificationModel,
):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
"""
Dataset arguments
"""
parser.add_argument(
"--use_pretrain", help="Whether to use pre-trained model", action="store_true"
)
parser.add_argument(
"--pretrain_model_dir",
type=str,
default="/datadrive/models/gpt_all_cs",
help="The address for pretrained model.",
)
# parser.add_argument(
# "--model_dir",
# type=str,
# default="/datadrive/models/gpt_all_reddit",
# help="The address for storing the models and optimization results.",
# )
parser.add_argument(
"--task_name",
type=str,
default="reddit",
help="The name of the stored models and optimization results.",
)
parser.add_argument(
"--sample_depth", type=int, default=6, help="How many numbers to sample the graph"
)
parser.add_argument(
"--sample_width",
type=int,
default=128,
help="How many nodes to be sampled per layer per type",
)
"""
Model arguments
"""
parser.add_argument(
"--conv_name",
type=str,
default="hgt",
choices=["hgt", "gcn", "gat", "rgcn", "han", "hetgnn"],
help="The name of GNN filter. By default is Heterogeneous Graph Transformer (hgt)",
)
parser.add_argument("--n_hid", type=int, default=400, help="Number of hidden dimension")
parser.add_argument("--n_heads", type=int, default=8, help="Number of attention head")
parser.add_argument("--n_layers", type=int, default=3, help="Number of GNN layers")
parser.add_argument(
"--prev_norm",
help="Whether to add layer-norm on the previous layers",
action="store_true",
)
parser.add_argument(
"--last_norm",
help="Whether to add layer-norm on the last layers",
action="store_true",
)
parser.add_argument("--dropout", type=int, default=0.2, help="Dropout ratio")
"""
Optimization arguments
"""
parser.add_argument(
"--optimizer",
type=str,
default="adamw",
choices=["adamw", "adam", "sgd", "adagrad"],
help="optimizer to use.",
)
parser.add_argument(
"--scheduler",
type=str,
default="cosine",
help="Name of learning rate scheduler.",
choices=["cycle", "cosine"],
)
parser.add_argument(
"--data_percentage",
type=int,
default=0.1,
help="Percentage of training and validation data to use",
)
parser.add_argument("--n_epoch", type=int, default=50, help="Number of epoch to run")
parser.add_argument(
"--n_pool", type=int, default=8, help="Number of process to sample subgraph"
)
parser.add_argument(
"--n_batch",
type=int,
default=10,
help="Number of batch (sampled graphs) for each epoch",
)
parser.add_argument(
"--batch_size", type=int, default=64, help="Number of output nodes for training"
)
parser.add_argument("--clip", type=int, default=0.5, help="Gradient Norm Clipping")
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return GPT_GNN()
def loss(self, data: Any) -> Any:
pass
def predict(self, data: Any) -> Any:
pass
def evaluate(self, data: Any, nodes: Any, targets: Any) -> Any:
pass
@staticmethod
def get_trainer(args) -> Optional[Type[Union[GPT_GNNHomogeneousTrainer, GPT_GNNHeterogeneousTrainer]]]:
# if taskType == NodeClassification:
return GPT_GNNHomogeneousTrainer
# elif taskType == HeterogeneousNodeClassification:
# return GPT_GNNHeterogeneousTrainer
# else:
# return None
| 32.509524 | 107 | 0.535228 | 721 | 6,827 | 4.871012 | 0.278779 | 0.056378 | 0.101651 | 0.030752 | 0.220387 | 0.198747 | 0.177677 | 0.09795 | 0.089408 | 0.045558 | 0 | 0.006113 | 0.35301 | 6,827 | 209 | 108 | 32.665072 | 0.788997 | 0.342464 | 0 | 0.305556 | 0 | 0 | 0.251699 | 0.006562 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0.027778 | 0.037037 | 0.018519 | 0.12037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68edc1beb9d2bae550d0e6eb145fb0caa55200b1 | 2,663 | py | Python | hadoopCluster/hadoop.py | anycode-inc/TerminalUI-CGI | 8db7d19d25b0de6d599b9de8a4172d0668f4d688 | [
"MIT"
] | null | null | null | hadoopCluster/hadoop.py | anycode-inc/TerminalUI-CGI | 8db7d19d25b0de6d599b9de8a4172d0668f4d688 | [
"MIT"
] | null | null | null | hadoopCluster/hadoop.py | anycode-inc/TerminalUI-CGI | 8db7d19d25b0de6d599b9de8a4172d0668f4d688 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from jinja2 import Environment, FileSystemLoader
import subprocess
import cgi
print("content-type: text/html")
print()
mydata = cgi.FieldStorage()
namenode_ip = mydata.getvalue("namenode_ip")
namenode_port = mydata.getvalue("namenode_port")
namenode_directory = mydata.getvalue("namenode_directory")
datanode_ip = mydata.getvalue("datanode_ip")
datanode_directory = mydata.getvalue("datanode_directory")
def installtionScript(nodeType,directoryPath):
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('installationScript.sh.j2')
output = template.render(nodeType = nodeType , directoryPath = directoryPath)
file = open("./temp/installationScript.sh", "w")
file.write("%s" %(output))
file.close()
def hdfsSite(nodeType,directoryPath):
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('hdfs-site.xml.j2')
output = template.render(nodeType = nodeType , directoryPath = directoryPath)
file = open("./temp/hdfs-site.xml", "w")
file.write("%s" %(output))
file.close()
def coreSite(nodeIp,nodePort):
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('core-site.xml.j2')
output = template.render(IP = nodeIp , port = nodePort)
file = open("./temp/core-site.xml", "w")
file.write("%s" %(output))
file.close()
def copyTemplate(nodeIP):
subprocess.run(f'scp ./temp/hdfs-site.xml root@{nodeIP}:/root/hdfs-site.xml',shell=True)
subprocess.run(f'scp ./temp/core-site.xml root@{nodeIP}:/root/core-site.xml',shell=True)
def nameNode(nameNodeIP):
nameNodeDirectory = namenode_directory
nameNodePort = namenode_port
hdfsSite('name',f'/root/{nameNodeDirectory}')
coreSite(nameNodeIP,nameNodePort)
copyTemplate(nameNodeIP)
installtionScript('name',nameNodeDirectory)
subprocess.run(f"ssh root@{nameNodeIP} 'bash -s' < ./temp/installationScript.sh",shell=True)
return nameNodePort
def dataNode(dataNodeIP,nameNodeIP,nameNodePort):
dataNodeDirectory = datanode_directory
hdfsSite('data',f'/root/{dataNodeDirectory}')
coreSite(nameNodeIP,nameNodePort)
copyTemplate(dataNodeIP)
installtionScript('data',dataNodeDirectory)
subprocess.run(f"ssh root@{dataNodeIP} 'bash -s' < ./temp/installationScript.sh",shell=True)
def configure():
nameNodeIP = namenode_ip
dataNodeIP = datanode_ip
nameNodePort = nameNode(nameNodeIP)
dataNode(dataNodeIP,nameNodeIP,nameNodePort)
print("HADOOP CLUSTER SUCCESS")
configure()
| 35.506667 | 96 | 0.733383 | 298 | 2,663 | 6.473154 | 0.244966 | 0.029031 | 0.02281 | 0.054432 | 0.411612 | 0.346293 | 0.327631 | 0.288232 | 0.273199 | 0.273199 | 0 | 0.002163 | 0.131806 | 2,663 | 74 | 97 | 35.986486 | 0.83218 | 0.006384 | 0 | 0.262295 | 0 | 0 | 0.220038 | 0.084688 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114754 | false | 0 | 0.04918 | 0 | 0.180328 | 0.04918 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68f2c194115b15d0b37b208d612c93ce0b1eb749 | 1,418 | py | Python | gs15_py/Inversion.py | Jajajzhh/Blockchain_EncryptionKasumi | 604352a804433e482d754ab928aaacfd70cada04 | [
"MIT"
] | null | null | null | gs15_py/Inversion.py | Jajajzhh/Blockchain_EncryptionKasumi | 604352a804433e482d754ab928aaacfd70cada04 | [
"MIT"
] | null | null | null | gs15_py/Inversion.py | Jajajzhh/Blockchain_EncryptionKasumi | 604352a804433e482d754ab928aaacfd70cada04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#Examples of irreductible polynomes 16 degree
#x^16 + x^9 + x^8 + x^7 + x^6 + x^4 + x^3 + x^2 + 1
#x^16 + x^12 + x^3 + x^1 + 1
#x^16 + x^12 + x^7 + x^2 + 1
from sympy.polys.domains import ZZ
from sympy.polys.galoistools import gf_gcdex, gf_strip
def gf_inv(a): # irriducible polynomial
# mod = 0x18f57 => x^16 + x^15 + x^11 + x^10 + x^9 + x^8 + x^6 + x^4 + x^2 + x^1 + 1 Polynome irreductible
mod = [1,1,0,0,0,1,1,1,1,0,1,0,1,0,1,1,1]
a = hextolist(a)
s, t, g = gf_gcdex(ZZ.map(gf_strip(a)), ZZ.map(mod), 2 , ZZ)
return listtohex(s)
def gf_degree(a) :
res = 0
a >>= 1
while (a != 0) :
a >>= 1
res += 1
return res
def gf_invert(a, mod) :
v = mod
g1 = 1
g2 = 0
j = gf_degree(a) - 8
while (a != 1) :
if (j < 0) :
a, v = v, a
g1, g2 = g2, g1
j = -j
a ^= v << j
g1 ^= g2 << j
a %= 256 # Emulating 8-bit overflow
g1 %= 256 # Emulating 8-bit overflow
j = gf_degree(a) - gf_degree(v)
return g1
def hextolist(num) :
outlist = [1 if num & (1 << (15-n)) else 0 for n in range(16)]
return outlist
def listtohex(bitlist) :
out = 0
for bit in bitlist:
out = (out << 1) | bit
return out
if __name__ == '__main__':
a = 0xf48
s = gf_inv(a)
print(hex(s))
b = 0x4ccd
s = gf_inv(b)
print(hex(s))
| 23.245902 | 111 | 0.504937 | 261 | 1,418 | 2.666667 | 0.295019 | 0.022989 | 0.022989 | 0.011494 | 0.12931 | 0.022989 | 0 | 0 | 0 | 0 | 0 | 0.112169 | 0.333568 | 1,418 | 60 | 112 | 23.633333 | 0.624339 | 0.245416 | 0 | 0.088889 | 0 | 0 | 0.007992 | 0 | 0 | 0 | 0.010989 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.044444 | 0 | 0.266667 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68f4f86882cc9b12d44b29372c5ed00acbd058e5 | 2,469 | py | Python | pic2formula/polyline.py | s-col/pic2formula | 0e254f98fca8dd22b5136d018cd33413baba85d5 | [
"MIT"
] | null | null | null | pic2formula/polyline.py | s-col/pic2formula | 0e254f98fca8dd22b5136d018cd33413baba85d5 | [
"MIT"
] | null | null | null | pic2formula/polyline.py | s-col/pic2formula | 0e254f98fca8dd22b5136d018cd33413baba85d5 | [
"MIT"
] | null | null | null | import math
import numpy as np
from scipy import interpolate
class Polyline(list):
@staticmethod
def _2Dcheck(value):
if len(value) != 2:
raise ValueError("Value must be 2-D.")
def __init__(self):
super().__init__()
def __setitem__(self, key, value):
self._2Dcheck(value)
value = np.array(value, np.float64)
super().__setitem__(key, value)
def __str__(self):
s = ""
for value in self:
s += "["
for v in value:
s += str(v) + ", "
s = s[:-2] + "], "
s = "[{}]".format(s[:-2])
return s
def append(self, value):
self._2Dcheck(value)
value = np.array(value, np.float64)
super().append(value)
def extend(self, sequence):
for value in sequence:
self.append(value)
def length(self):
res = 0
n = len(self)
for i in range(n-1):
e = self[i+1] - self[i]
d = math.sqrt(np.dot(e, e))
res += d
return res
def get_x_arr(self):
return np.array([p[0] for p in self])
def get_y_arr(self):
return np.array([p[1] for p in self])
def bspline(self, k=2):
c = np.array(self)
n = c.shape[0]
if n <= k:
msg = "The number of points must be more than {}."
raise ValueError(msg.format(k))
t = np.zeros(n+k+1, dtype=np.float64)
t[n+1:] = 1
t[k:n+1] = np.linspace(0, 1, n-k+1)
return interpolate.BSpline(t, c, k, axis=0)
def closed_bspline(self, epsilon=2, k=2):
pl = self._close_polyline(epsilon=epsilon)
c = np.array(pl)
if np.any(c[0, :] != c[-1, :]):
c = np.vstack((c, c[0, :]))
c = np.vstack((c, c[1:k, :]))
n = c.shape[0]
dt = 1 / (n - k)
t0 = - k * dt
tm = 1 + k * dt
t = np.linspace(t0, tm, n+k+1)
return interpolate.BSpline(t, c, k, axis=0)
def _close_polyline(self, epsilon):
"""
折れ線を閉じる
"""
res = self.copy()
r = self[-1] - self[0]
delta = math.sqrt(r[0] ** 2 + r[1] ** 2)
if delta < epsilon:
res.append(self[0])
else:
tmp = res[:-1]
tmp.reverse()
res.extend(tmp)
return res
| 26.548387 | 63 | 0.45565 | 334 | 2,469 | 3.272455 | 0.260479 | 0.038426 | 0.008234 | 0.038426 | 0.247027 | 0.203111 | 0.164684 | 0.164684 | 0.164684 | 0.164684 | 0 | 0.032952 | 0.397732 | 2,469 | 92 | 64 | 26.836957 | 0.702085 | 0.002835 | 0 | 0.133333 | 0 | 0 | 0.029838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.04 | 0.026667 | 0.306667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68fe5357f2994a0a75698eb8a8e7824b9a9f7c44 | 2,592 | py | Python | season_crawler.py | charmoky/f1_crawler | c4edec5d0a19283690347fc9ed21c454e5db2d4b | [
"MIT"
] | null | null | null | season_crawler.py | charmoky/f1_crawler | c4edec5d0a19283690347fc9ed21c454e5db2d4b | [
"MIT"
] | null | null | null | season_crawler.py | charmoky/f1_crawler | c4edec5d0a19283690347fc9ed21c454e5db2d4b | [
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
import lxml
import re
import ast
import urllib.parse as urllib
class season_crawler():
def __init__(self, url):
self.url = url
self.hostname = urllib.urlparse(url).hostname
self.source = requests.get(url).text
self.soup = BeautifulSoup(self.source, 'html.parser')
def get_calendar(self):
calendar = {}
# Find out where the calendar is
h2s = self.soup.select('h2')
for h2 in h2s:
if h2.text == "Calendar":
div = h2.parent
# Find out where the event and date are
table = div.find('table')
table_head = table.find('thead')
date_idx = 0
gp_idx = 0
idx = 0
rows = table_head.find_all('tr')
for row in rows:
cols = row.find_all('th')
for col in cols:
if col.text == "Date":
date_idx = idx
if col.text == "Event":
gp_idx = idx
idx = idx + 1
tables_body = table.find_all('tbody')
for table in tables_body:
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
links = cols[gp_idx].select('a')
for link in links:
link = link.get("href")
break
link = link.replace("/classification", "/session-facts")
calendar[cols[gp_idx].text] = ["http://" + self.hostname + link, cols[date_idx].text]
return calendar
def get_drivers(self):
drivers = []
# Find out where the Drivers are
h2s = self.soup.select('h2')
for h2 in h2s:
if h2.text == "Entry List":
div = h2.parent
# Find out where the drivers are
table = div.find('table')
table_head = table.find('thead')
driver_idx = 0
idx = 0
rows = table_head.find_all('tr')
for row in rows:
cols = row.find_all('th')
for col in cols:
if col.text == "Drivers":
driver_idx = idx
idx = idx + 1
tables_body = table.find_all('tbody')
for table in tables_body:
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
drivers.append(cols[driver_idx].text)
return drivers
| 28.483516 | 101 | 0.486497 | 305 | 2,592 | 4.019672 | 0.255738 | 0.057096 | 0.039152 | 0.04894 | 0.500816 | 0.500816 | 0.474715 | 0.4323 | 0.4323 | 0.4323 | 0 | 0.013201 | 0.415509 | 2,592 | 90 | 102 | 28.8 | 0.79604 | 0.050154 | 0 | 0.470588 | 0 | 0 | 0.055397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044118 | false | 0 | 0.088235 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68ffd02431b608770d9cb6427c85ad56bdc7189e | 1,921 | py | Python | cogs/clock/cog.py | DenverCoder1/weasley-chess-bot | 70d6250cf2eea4faceacf3a809f38d8cc6f19059 | [
"MIT"
] | 2 | 2021-05-05T16:06:38.000Z | 2021-05-05T19:21:30.000Z | cogs/clock/cog.py | DenverCoder1/weasley-chess-bot | 70d6250cf2eea4faceacf3a809f38d8cc6f19059 | [
"MIT"
] | 7 | 2021-06-22T21:36:29.000Z | 2022-01-21T19:15:58.000Z | cogs/clock/cog.py | DenverCoder1/weasley-chess-bot | 70d6250cf2eea4faceacf3a809f38d8cc6f19059 | [
"MIT"
] | null | null | null | import config
import discord
from discord.errors import HTTPException
from discord.ext import commands
from discord.ext.tasks import loop
from .clock import (
clock_embed,
get_or_create_message,
new_channel_name,
get_embed_title,
)
class Clock(commands.Cog, name="🕒 Clock"):
def __init__(self, bot: commands.Bot):
self.__bot = bot
@commands.Cog.listener()
async def on_ready(self):
"""When discord is connected"""
# Start clock
self.clock.start()
print("Starting clock...")
@loop(seconds=1)
async def clock(self):
"""Loop to check and update clock"""
# update the clock message
try:
embed = clock_embed()
# update only if the time is different
if embed.title != get_embed_title(self.__message):
# edit the message
await self.__message.edit(embed=embed)
except HTTPException:
# if message doesn't exist, create a new one
self.__message = await get_or_create_message(self.__bot, self.__channel)
# update channel name if it has changed
channel_name = new_channel_name()
if self.__channel.name != channel_name:
await self.__channel.edit(name=channel_name)
@clock.before_loop
async def clock_init(self) -> None:
"""print startup info before reddit feed loop begins"""
# get clock channel object
self.__channel = self.__bot.get_channel(config.CLOCK_CHANNEL_ID)
# check that channel exists
if not isinstance(self.__channel, discord.TextChannel):
print("Couldn't find that channel.")
return self.clock.cancel()
# if channel exists, get the last message from the bot or create one
self.__message = await get_or_create_message(self.__bot, self.__channel)
def setup(bot):
bot.add_cog(Clock(bot))
| 32.559322 | 84 | 0.645497 | 247 | 1,921 | 4.777328 | 0.327935 | 0.065254 | 0.027966 | 0.045763 | 0.09322 | 0.09322 | 0.09322 | 0.09322 | 0.09322 | 0.09322 | 0 | 0.000711 | 0.26809 | 1,921 | 58 | 85 | 33.12069 | 0.837838 | 0.150442 | 0 | 0.052632 | 0 | 0 | 0.034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec006c8af6686f4d727533bc4bdf28b8f92b3527 | 6,989 | py | Python | Ellie_Lambda_Introductory_Convo.py | dodobirb/Project_2_ETF_Lex_RoboAdvisor | f8e1a5143e74c0dc6f8bef1ed6d4b4fbd6c66739 | [
"MIT"
] | null | null | null | Ellie_Lambda_Introductory_Convo.py | dodobirb/Project_2_ETF_Lex_RoboAdvisor | f8e1a5143e74c0dc6f8bef1ed6d4b4fbd6c66739 | [
"MIT"
] | null | null | null | Ellie_Lambda_Introductory_Convo.py | dodobirb/Project_2_ETF_Lex_RoboAdvisor | f8e1a5143e74c0dc6f8bef1ed6d4b4fbd6c66739 | [
"MIT"
] | null | null | null | ### Required Libraries ###
from datetime import datetime
from dateutil.relativedelta import relativedelta
from botocore.vendored import requests
### Functionality Helper Functions ###
def parse_float(n):
"""
Securely converts a non-numeric value to float.
"""
try:
return float(n)
except ValueError:
return float("nan")
def build_validation_result(is_valid, violated_slot, message_content):
"""
Defines an internal validation message structured as a python dictionary.
"""
if message_content is None:
return {"isValid": is_valid, "violatedSlot": violated_slot}
return {
"isValid": is_valid,
"violatedSlot": violated_slot,
"message": {"contentType": "PlainText", "content": message_content},
}
def validate_data(birthdate, term, intent_request):
birthdate = get_slots(intent_request)["birthdate"]
term = get_slots(intent_request)["term"]
"""
Validates the data provided by the user.
"""
# Validate that the user is over 18 years old
if birthdate is not None:
birth_date = datetime.strptime(birthdate, "%Y-%m-%d")
age = relativedelta(datetime.now(), birth_date).years
if age < 18:
return build_validation_result(
False,
"birthdate",
"You should be at least 18 years old to use this service, "
"please provide a different date of birth.",
)
#Validate term length (short or long)
if term is not None:
if term.lower() not in {"short", "long"}:
return build_validation_result(
False,
"term",
"I don't understand. Please enter short or long."
)
return build_validation_result(True, True, None)
### Dialog Actions Helper Functions ###
def get_slots(intent_request):
"""
Fetch all the slots and their values from the current intent.
"""
return intent_request["currentIntent"]["slots"]
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
"""
Defines an elicit slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "ElicitSlot",
"intentName": intent_name,
"slots": slots,
"slotToElicit": slot_to_elicit,
"message": message,
},
}
def delegate(session_attributes, slots):
"""
Defines a delegate slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {"type": "Delegate", "slots": slots},
}
def close(session_attributes, fulfillment_state, message):
"""
Defines a close slot type response.
"""
response = {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "Close",
"fulfillmentState": fulfillment_state,
"message": message,
},
}
return response
### Intents Handlers ###
def ellie_conversation(intent_request):
"""
Performs dialog management and fulfillment for converting from dollars to bitcoin.
"""
# Gets slots' values
birthdate = get_slots(intent_request)["birthdate"]
term_length = get_slots(intent_request)["term"]
risk_level = get_slots(intent_request)["risk"]
term_len_num = 0
# Gets the invocation source, for Lex dialogs "DialogCodeHook" is expected.
source = intent_request["invocationSource"]
if source == "DialogCodeHook":
# This code performs basic validation on the supplied input slots.
# Gets all the slots
slots = get_slots(intent_request)
# Validates user's input using the validate_data function
validation_result = validate_data(birthdate, term_length, intent_request)
# If the data provided by the user is not valid,
# the elicitSlot dialog action is used to re-prompt for the first violation detected.
if not validation_result["isValid"]:
slots[validation_result["violatedSlot"]] = None # Cleans invalid slot
# Returns an elicitSlot dialog to request new data for the invalid slot
return elicit_slot(
intent_request["sessionAttributes"],
intent_request["currentIntent"]["name"],
slots,
validation_result["violatedSlot"],
validation_result["message"],
)
# Fetch current session attributes
output_session_attributes = intent_request["sessionAttributes"]
# Once all slots are valid, a delegate dialog is returned to Lex to choose the next course of action.
return delegate(output_session_attributes, get_slots(intent_request))
if term_length is not None:
if term_length.lower() == "short":
term_len_num = 10
elif term_length.lower() == "long":
term_len_num = 20
if risk_level is not None:
if risk_level.lower() == "none":
risk_lev_num = 1
elif risk_level.lower() == "low":
risk_lev_num = 2
elif risk_level.lower() == "medium":
risk_lev_num = 3
elif risk_level.lower() =="high":
risk_lev_num = 4
allocation = term_len_num + risk_lev_num
if allocation in {11, 12, 21, 22}:
return close(
intent_request["sessionAttributes"],
"Fulfilled",
{
"contentType": "PlainText",
"content" : "Your ideal portfolio should be 40% stocks and 60% bonds. Which would you like to explore first?"
})
elif allocation in {13, 14, 23, 24}:
return close(
intent_request["sessionAttributes"],
"Fulfilled",
{
"contentType": "PlainText",
"content": "Your ideal portfolio should be 80% stocks and 20% bonds. Which would you like to explore first?"
})
### Intents Dispatcher ###
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
# Get the name of the current intent
intent_name = intent_request["currentIntent"]["name"]
# Dispatch to bot's intent handlers
if intent_name == "EllieIntro":
return ellie_conversation(intent_request)
raise Exception("Intent with name " + intent_name + " not supported")
### Main Handler ###
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
return dispatch(event)
| 31.624434 | 126 | 0.593075 | 740 | 6,989 | 5.45 | 0.312162 | 0.067692 | 0.027771 | 0.041656 | 0.216464 | 0.159683 | 0.147781 | 0.104637 | 0.086784 | 0.051079 | 0 | 0.008166 | 0.31664 | 6,989 | 220 | 127 | 31.768182 | 0.836265 | 0.201889 | 0 | 0.219512 | 0 | 0.01626 | 0.191939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081301 | false | 0 | 0.02439 | 0 | 0.243902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec01276c37a7b31604c880dcb9890eab8f123340 | 876 | py | Python | Algoritma/radixSort.py | SyamsulAlterra/Alta | 13e8c185e91414e3f46e5d20f39370f8e58e7cd0 | [
"MIT"
] | null | null | null | Algoritma/radixSort.py | SyamsulAlterra/Alta | 13e8c185e91414e3f46e5d20f39370f8e58e7cd0 | [
"MIT"
] | 6 | 2021-09-02T18:50:40.000Z | 2022-02-27T11:06:31.000Z | Algoritma/radixSort.py | SyamsulAlterra/Alta | 13e8c185e91414e3f46e5d20f39370f8e58e7cd0 | [
"MIT"
] | null | null | null | arr=[123,321,487,908,123,465,987,46,762,12389]
def findMax(arr):
Max=None
for num in arr:
if Max==None or num>Max:
Max=num
return Max
def checkDigit(num):
i=0
remainder=0
while (remainder!=num):
i+=1
remainder=num%(10**i)
return i
def digit(i,num):
return int((num%(10**i)-num%(10**(i-1)))/(10**(i-1)))
def radix(arr):
digitMax=checkDigit(findMax(arr))
currentDigit=0
while(currentDigit<=digitMax):
currentDigit+=1
lst=[[],[],[],[],[],[],[],[],[],[]]
for num in arr:
numberDigit=digit(currentDigit,num)
if checkDigit(num)<currentDigit:
lst[0].append(num)
else:
lst[numberDigit].append(num)
arr=[]
for l in lst:
arr+=l
print(lst)
return arr
print(radix(arr))
| 21.365854 | 57 | 0.515982 | 114 | 876 | 3.964912 | 0.333333 | 0.026549 | 0.039823 | 0.048673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078073 | 0.312785 | 876 | 40 | 58 | 21.9 | 0.672757 | 0 | 0 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0.029412 | 0.235294 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec029f2e7581e96d907379e0c7b77a55feafa39c | 577 | py | Python | users/urls.py | pauloaugusto-dmf/blog_django | 7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8 | [
"MIT"
] | 2 | 2021-12-31T22:14:31.000Z | 2021-12-31T22:14:34.000Z | users/urls.py | pauloaugusto-dmf/blog_django | 7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8 | [
"MIT"
] | null | null | null | users/urls.py | pauloaugusto-dmf/blog_django | 7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import (
UserLoginView,
UserLogoutView,
UserSignupView,
UserUpdateView,
UserDeleteView,
UserProfileView,
)
app_name = "user"
urlpatterns = [
path("signup", UserSignupView.as_view(), name="signup"),
path("update", UserUpdateView.as_view(), name="update"),
path("delete", UserDeleteView.as_view(), name="delete"),
path("login", UserLoginView.as_view(), name="login"),
path("logout", UserLogoutView.as_view(), name="logout"),
path("profile", UserProfileView.as_view(), name="profile"),
]
| 26.227273 | 63 | 0.679376 | 60 | 577 | 6.416667 | 0.4 | 0.093506 | 0.155844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157712 | 577 | 21 | 64 | 27.47619 | 0.792181 | 0 | 0 | 0 | 0 | 0 | 0.131716 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec03beeeb8e83dd9401f7dcd95618bf9eb96ca5c | 7,232 | py | Python | utils/image_processing.py | ebritsyn/obormot | de05707d0a9b7a8de813ee8e1104dea02caeb236 | [
"MIT"
] | 3 | 2017-12-09T16:16:18.000Z | 2020-05-05T12:01:53.000Z | utils/image_processing.py | ebritsyn/obormot | de05707d0a9b7a8de813ee8e1104dea02caeb236 | [
"MIT"
] | null | null | null | utils/image_processing.py | ebritsyn/obormot | de05707d0a9b7a8de813ee8e1104dea02caeb236 | [
"MIT"
] | null | null | null | import io
import cv2
import numpy as np
import tensorflow as tf
from PIL import Image
from keras.models import model_from_json
import dlib
class Model:
"""This class represents the base model of the whole project. The model
predicts if the face on the picture is smiling or not. The model and
its weights are being loaded from data/model/. The main method of the
class is predict_labels(), which takes an input image, finds faces in
this image, draws their boundary boxes and labels these faces by adding
corresponding emojis on the picture.
"""
def __init__(self):
self.smiley = cv2.imread('data/pics/smiling.png')
self.neutral = cv2.imread('data/pics/neutral.png')
self.smiley = cv2.cvtColor(self.smiley, cv2.COLOR_BGR2RGB)
self.neutral = cv2.cvtColor(self.neutral, cv2.COLOR_BGR2RGB)
self.model = model_from_json(open('data/model/model.json').read())
self.model.load_weights('data/model/weights.h5')
self.graph = tf.get_default_graph()
@staticmethod
def convert2rgb(img):
"""Convert BGR image into RGB
Parameters: img: ndarray
:rtype: ndarray
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
@staticmethod
def rect_to_bb(rect):
"""Returns rectangle parameters from rectangle object
Parameters: rect: object
:rtype: list
"""
rect_x = rect.left()
rect_y = rect.top()
rect_w = rect.right() - rect_x
rect_h = rect.bottom() - rect_y
return rect_x, rect_y, rect_w, rect_h
def get_faces(self, img):
"""Find faces in the picture and return list of boundary boxes
of found faces
Parameters: img: ndarray
:rtype: list
"""
image = img
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
faces = []
for rect in rects:
faces.append(list(self.rect_to_bb(rect)))
return faces
def get_smile_label(self, img_face):
"""Predict smile on a face and return label
(0 for neutral and 1 for smiling face)
Parameters: img_face: ndarray
:rtype: int
"""
gray_cr_res = cv2.cvtColor(cv2.resize(img_face, (32, 32)),
cv2.COLOR_BGR2GRAY)
gray_cr_res = np.reshape(gray_cr_res, (32, 32, 1)) / 255
with self.graph.as_default():
score = self.model.predict(np.array([gray_cr_res]))[0][1]
threshold = 0.12
if score > threshold:
label = 1
else:
label = 0
return label
@staticmethod
def get_sticker_backgr(backgr, sticker):
"""Merge a sticker and its background and return merged image
Parameters: backgr: ndarray
sticker: ndarray
:rtype: ndarray
"""
sticker_gray = cv2.cvtColor(sticker, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(sticker_gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
backgr_bg = cv2.bitwise_and(backgr, backgr, mask=mask_inv)
sticker_fg = cv2.bitwise_and(sticker, sticker, mask=mask)
merged = cv2.add(backgr_bg, sticker_fg)
return merged
@staticmethod
def crop_face(bound_box, img):
"""Crop face from the input image according to its boundary box
Parameters: bound_box: list
img: ndarray
:rtype: ndarray
"""
f_x, f_y, f_w, f_h = bound_box
top, bottom, left, right = 0, 0, 0, 0
if f_y < 0:
top = - f_y
f_y = 0
if f_x < 0:
left = - f_x
f_x = 0
if f_x + f_w > img.shape[1]:
right = f_x + f_w - img.shape[1]
f_w -= right
if f_y + f_h > img.shape[0]:
bottom = f_y + f_h - img.shape[0]
f_h -= bottom
img_cropped = img[f_y:f_y + f_h, f_x:f_x + f_w]
img_cropped = cv2.copyMakeBorder(img_cropped,
top, bottom, left, right,
cv2.BORDER_REPLICATE)
return img_cropped
def add_stickers(self, img, faces, labels):
"""Add emoji sticker in the input picture according to predicted
smile labels
Parameters: img: ndarray of input image
faces: list of boundary boxes
labels: list of smile labels
:rtype: ndarray
"""
image = np.array(img)
for i, label in enumerate(labels):
if faces[i][0] < 0:
faces[i][2] += faces[i][0]
faces[i][0] = 0
if faces[i][1] < 0:
faces[i][3] += faces[i][1]
faces[i][1] = 0
if faces[i][0] + faces[i][2] > image.shape[1]:
faces[i][2] = image.shape[1] - faces[i][0]
if faces[i][1] + faces[i][3] > image.shape[0]:
faces[i][3] = image.shape[0] - faces[i][1]
st_size = int(min(faces[i][2], faces[i][3]) // 2.2)
smiley = cv2.resize(self.smiley, (st_size, st_size))
neutral = cv2.resize(self.neutral, (st_size, st_size))
y_1 = faces[i][1] + faces[i][3] - st_size
y_2 = faces[i][1] + faces[i][3]
x_1 = faces[i][0] + faces[i][2] - st_size
x_2 = faces[i][0] + faces[i][2]
if label == 1:
image[y_1:y_2, x_1:x_2] = \
self.get_sticker_backgr(image[y_1:y_2, x_1:x_2], smiley)
else:
image[y_1:y_2, x_1:x_2] = \
self.get_sticker_backgr(image[y_1:y_2, x_1:x_2], neutral)
return image
def predict_labels(self, img):
"""Predict if there smiles on picture and label faces with corresponding emoji
Parameters: img: ndarray of input image
:rtype: ndarray
"""
image = self.bytes2ndarray(img)
faces = self.get_faces(image)
num_faces = len(faces)
labels = []
if num_faces == 0:
return num_faces, image
for bound_box in faces:
img_cropped = self.crop_face(bound_box, image)
label = self.get_smile_label(img_cropped)
labels.append(label)
color = (0, 255, 0)
for (f_x, f_y, f_w, f_h) in faces:
cv2.rectangle(image, (f_x, f_y), (f_x + f_w, f_y + f_h), color, 2)
image = self.add_stickers(image, faces, labels)
return num_faces, self.ndarray2bytes(image)
@staticmethod
def ndarray2bytes(array):
"""Convert image into bytes
Parameters: array: ndarray
:rtype: io.BytesIO
"""
buf = io.BytesIO()
Image.fromarray(array).save(buf, format="jpeg")
return buf
@staticmethod
def bytes2ndarray(buf):
"""Convert image into bytes
Parameters: buf: io.BytesIO
:rtype: ndarray
"""
image = np.array(Image.open(buf))
return image
| 33.953052 | 86 | 0.552683 | 971 | 7,232 | 3.955716 | 0.183316 | 0.040614 | 0.007029 | 0.004166 | 0.145795 | 0.102317 | 0.063525 | 0.049987 | 0.023952 | 0.023952 | 0 | 0.029914 | 0.343612 | 7,232 | 212 | 87 | 34.113208 | 0.779229 | 0.215431 | 0 | 0.093023 | 0 | 0 | 0.016849 | 0.016083 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085271 | false | 0 | 0.054264 | 0 | 0.232558 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec05ae5f172a76ae27b1aab78fa3fe888df7828e | 30,898 | py | Python | antipetros_discordbot/cogs/general_cogs/image_manipulation_cog.py | official-antistasi-community/Antipetros_Discord_Bot | 1b5c8b61c09e61cdff671e259f0478d343a50c8d | [
"MIT"
] | null | null | null | antipetros_discordbot/cogs/general_cogs/image_manipulation_cog.py | official-antistasi-community/Antipetros_Discord_Bot | 1b5c8b61c09e61cdff671e259f0478d343a50c8d | [
"MIT"
] | null | null | null | antipetros_discordbot/cogs/general_cogs/image_manipulation_cog.py | official-antistasi-community/Antipetros_Discord_Bot | 1b5c8b61c09e61cdff671e259f0478d343a50c8d | [
"MIT"
] | 1 | 2021-02-12T01:10:51.000Z | 2021-02-12T01:10:51.000Z |
# region [Imports]
# * Standard Library Imports ---------------------------------------------------------------------------->
import os
import asyncio
from io import BytesIO
from pathlib import Path
from datetime import datetime
from tempfile import TemporaryDirectory
# * Third Party Imports --------------------------------------------------------------------------------->
import discord
from PIL import Image, ImageEnhance, ImageDraw, ImageFont, ImageFilter
from pytz import timezone
from discord.ext import commands, flags
# * Gid Imports ----------------------------------------------------------------------------------------->
import gidlogger as glog
# * Local Imports --------------------------------------------------------------------------------------->
from antipetros_discordbot.utility.misc import alt_seconds_to_pretty
from antipetros_discordbot.utility.checks import allowed_channel_and_allowed_role, has_attachments, log_invoker, owner_or_admin
from antipetros_discordbot.utility.embed_helpers import make_basic_embed
from antipetros_discordbot.utility.gidtools_functions import pathmaker
from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper
from antipetros_discordbot.utility.exceptions import ParameterError
from antipetros_discordbot.utility.image_manipulation import find_min_fontsize, make_perfect_fontsize
from typing import TYPE_CHECKING
from antipetros_discordbot.utility.enums import CogMetaStatus, UpdateTypus, WatermarkPosition
from antipetros_discordbot.engine.replacements import AntiPetrosBaseCog, AntiPetrosBaseGroup, AntiPetrosFlagCommand, CommandCategory, auto_meta_info_command, auto_meta_info_group
if TYPE_CHECKING:
from antipetros_discordbot.engine.antipetros_bot import AntiPetrosBot
# endregion[Imports]
# region [TODO]
# TODO: create regions for this file
# TODO: Document and Docstrings
# endregion [TODO]
# region [Logging]
log = glog.aux_logger(__name__)
glog.import_notification(log, __name__)
# endregion[Logging]
# region [Constants]
APPDATA = ParaStorageKeeper.get_appdata()
BASE_CONFIG = ParaStorageKeeper.get_config('base_config')
COGS_CONFIG = ParaStorageKeeper.get_config('cogs_config')
THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__)) # location of this file, does not work if app gets compiled to exe with pyinstaller
# endregion [Constants]
class ImageManipulationCog(AntiPetrosBaseCog, command_attrs={'hidden': False, "categories": CommandCategory.GENERAL}):
"""
Commands that manipulate or generate images.
"""
# region [ClassAttributes]
public = True
meta_status = CogMetaStatus.WORKING | CogMetaStatus.OPEN_TODOS | CogMetaStatus.FEATURE_MISSING | CogMetaStatus.NEEDS_REFRACTORING
long_description = ""
extra_info = ""
required_config_data = {'base_config': {},
'cogs_config': {"avatar_stamp": "ASLOGO1",
"avatar_stamp_fraction": "0.2",
"stamps_margin": "5",
"stamp_fraction": "0.3"}}
required_folder = []
required_files = []
allowed_stamp_formats = {".jpg", ".jpeg", ".png", ".tga", ".tiff", ".ico", ".icns", ".gif"}
stamp_positions = {'top': WatermarkPosition.Top, 'bottom': WatermarkPosition.Bottom, 'left': WatermarkPosition.Left, 'right': WatermarkPosition.Right, 'center': WatermarkPosition.Center}
# endregion[ClassAttributes]
# region [Init]
def __init__(self, bot: "AntiPetrosBot"):
super().__init__(bot)
self.stamp_location = APPDATA['stamps']
self.stamps = {}
self.nato_symbol_parts_location = APPDATA['nato_symbol_parts']
self.nato_symbol_parts_images = {}
self.stamp_pos_functions = {WatermarkPosition.Right | WatermarkPosition.Bottom: self._to_bottom_right,
WatermarkPosition.Right | WatermarkPosition.Top: self._to_top_right,
WatermarkPosition.Right | WatermarkPosition.Center: self._to_center_right,
WatermarkPosition.Left | WatermarkPosition.Bottom: self._to_bottom_left,
WatermarkPosition.Left | WatermarkPosition.Top: self._to_top_left,
WatermarkPosition.Left | WatermarkPosition.Center: self._to_center_left,
WatermarkPosition.Center | WatermarkPosition.Center: self._to_center_center,
WatermarkPosition.Center | WatermarkPosition.Bottom: self._to_bottom_center,
WatermarkPosition.Center | WatermarkPosition.Top: self._to_top_center}
self.stamp_pos_functions_by_num = {'3': self._to_bottom_right,
'9': self._to_top_right,
'6': self._to_center_right,
'1': self._to_bottom_left,
'7': self._to_top_left,
'4': self._to_center_left,
'5': self._to_center_center,
'2': self._to_bottom_center,
'8': self._to_top_center}
self.position_normalization_table = {'top': ['upper', 'above', 'up', 't', 'u'],
'bottom': ['down', 'lower', 'b', 'base'],
'center': ['middle', 'c', 'm'],
'left': ['l'],
'right': ['r']}
# self.base_map_image = Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v3_2000_w_outposts.png")
# self.outpost_overlay = {'city': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_city_marker.png"),
# 'volcano': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_volcano_marker.png"),
# 'airport': Image.open(r"D:\Dropbox\hobby\Modding\Ressources\Arma_Ressources\maps\tanoa_v2_2000_airport_marker.png")}
self.old_map_message = None
self.color = "blue"
# endregion[Init]
# region [Setup]
async def on_ready_setup(self):
await super().on_ready_setup()
self._get_stamps()
self._get_nato_symbol_parts()
self.ready = True
log.debug('setup for cog "%s" finished', str(self))
async def update(self, typus: UpdateTypus):
await super().update(typus=typus)
self._get_stamps()
log.debug('cog "%s" was updated', str(self))
# endregion[Setup]
# region [Properties]
@property
def target_stamp_fraction(self):
return COGS_CONFIG.retrieve(self.config_name, 'stamp_fraction', typus=float, direct_fallback=0.2)
@property
def stamp_margin(self):
return COGS_CONFIG.retrieve(self.config_name, 'stamps_margin', typus=int, direct_fallback=5)
@property
def avatar_stamp_fraction(self):
return COGS_CONFIG.retrieve(self.config_name, 'avatar_stamp_fraction', typus=float, direct_fallback=0.3)
@property
def avatar_stamp(self):
stamp_name = COGS_CONFIG.retrieve(self.config_name, 'avatar_stamp', typus=str, direct_fallback='ASLOGO1').upper()
return self._get_stamp_image(stamp_name, 1)
@property
def fonts(self):
fonts = {}
for file in os.scandir(APPDATA['fonts']):
if file.is_file() and file.name.endswith('ttf'):
fonts[file.name.split('.')[0].casefold()] = pathmaker(file.path)
return fonts
# endregion[Properties]
# region [Listener]
# endregion[Listener]
# region [Commands]
@flags.add_flag("--stamp-image", "-si", type=str, default='ASLOGO')
@flags.add_flag("--first-pos", '-fp', type=str, default="bottom")
@flags.add_flag("--second-pos", '-sp', type=str, default="right")
@flags.add_flag("--stamp-opacity", '-so', type=float, default=1.0)
@flags.add_flag('--factor', '-f', type=float, default=None)
@auto_meta_info_command(cls=AntiPetrosFlagCommand)
@allowed_channel_and_allowed_role(in_dm_allowed=False)
@commands.max_concurrency(1, per=commands.BucketType.guild, wait=True)
async def stamp_image(self, ctx, **flags):
"""
Stamps an image with a small image from the available stamps.
Needs to have the to stamp image as an attachment on the invoking message.
Usefull for watermarking images.
Get all available stamps with '@AntiPetros available_stamps'
Example:
@AntiPetros stamp_image -si ASLOGO -fp bottom -sp right -so 0.5 -f 0.25
"""
async with ctx.channel.typing():
if len(ctx.message.attachments) == 0:
# TODO: make as embed
await ctx.send('! **there is NO image to antistasify** !')
return
if flags.get('stamp_image') not in self.stamps:
# TODO: make as embed
await ctx.send("! **There is NO stamp with that name** !")
return
first_pos = self.stamp_positions.get(flags.get("first_pos").casefold(), None)
second_pos = self.stamp_positions.get(flags.get("second_pos").casefold(), None)
if any(_pos is None for _pos in [first_pos, second_pos]) or first_pos | second_pos not in self.stamp_pos_functions:
# TODO: make as embed
await ctx.send("! **Those are NOT valid position combinations** !")
return
for _file in ctx.message.attachments:
# TODO: maybe make extra attribute for input format, check what is possible and working. else make a generic format list
if any(_file.filename.endswith(allowed_ext) for allowed_ext in self.allowed_stamp_formats):
_stamp = self._get_stamp_image(flags.get('stamp_image'), flags.get('stamp_opacity'))
_stamp = _stamp.copy()
with TemporaryDirectory(prefix='temp') as temp_dir:
temp_file = Path(pathmaker(temp_dir, 'temp_file.png'))
log.debug("Tempfile '%s' created", temp_file)
await _file.save(temp_file)
in_image = await asyncio.to_thread(Image.open, temp_file)
in_image = await asyncio.to_thread(in_image.copy)
factor = self.target_stamp_fraction if flags.get('factor') is None else flags.get('factor')
pos_function = self.stamp_pos_functions.get(first_pos | second_pos)
in_image = await asyncio.to_thread(pos_function, in_image, _stamp, factor)
name = 'antistasified_' + os.path.splitext(_file.filename)[0]
await ctx.message.delete()
# TODO: make as embed
await self._send_image(ctx, in_image, name, f"__**{name}**__")
@auto_meta_info_command()
@allowed_channel_and_allowed_role(in_dm_allowed=False)
@commands.cooldown(1, 120, commands.BucketType.channel)
async def available_stamps(self, ctx):
"""
Posts all available stamps.
Removes them after 2min to keep channel clean.
Example:
@AntiPetros available_stamps
"""
await ctx.message.delete()
await ctx.send(embed=await make_basic_embed(title="__**Currently available Stamps are:**__", footer="These messages will be deleted in 120 seconds", symbol='photo'), delete_after=120)
for name, image_path in self.stamps.items():
thumb_image = Image.open(image_path)
thumb_image.thumbnail((128, 128))
with BytesIO() as image_binary:
await asyncio.sleep(0)
thumb_image.save(image_binary, 'PNG', optimize=True)
image_binary.seek(0)
_file = discord.File(image_binary, filename=name + '.png')
embed = discord.Embed(title="Available Stamp")
embed.add_field(name='Stamp Name:', value=name)
embed.set_image(url=f"attachment://{name}.png")
await ctx.send(embed=embed, file=_file, delete_after=120)
@auto_meta_info_group(case_insensitive=True, cls=AntiPetrosBaseGroup)
async def member_avatar(self, ctx):
"""
Stamps the avatar of a Member with the Antistasi Crest.
Returns the new stamped avatar as a .PNG image that the Member can save and replace his orginal avatar with.
Example:
@AntiPetros member_avatar
"""
@member_avatar.command()
@allowed_channel_and_allowed_role()
async def for_discord(self, ctx):
modified_avatar = await self._member_avatar_helper(ctx.author, self._to_center_center, 0.66)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**", delete_after=300) # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
await ctx.message.delete()
@member_avatar.command()
async def for_github(self, ctx):
modified_avatar = await self._member_avatar_helper(ctx.author, self._to_bottom_center, 1)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**", delete_after=300) # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
await ctx.message.delete()
@member_avatar.command()
async def by_num(self, ctx, numberpad: str):
if len(numberpad) > 1:
await ctx.send('please only enter a single digit for numberpad position, please retry!')
return
if numberpad == '0':
await ctx.send('0 is not a valid position, please try again!')
return
func = self.stamp_pos_functions_by_num.get(numberpad)
modified_avatar = await self._member_avatar_helper(ctx.author, func, 1)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**", delete_after=300) # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
await ctx.message.delete()
@member_avatar.command()
async def by_place(self, ctx, first_pos: str, second_pos: str):
first_pos = await self._normalize_pos(first_pos)
second_pos = await self._normalize_pos(second_pos)
func = self.stamp_pos_functions.get(self.stamp_positions.get(first_pos) | self.stamp_positions.get(second_pos))
modified_avatar = await self._member_avatar_helper(ctx.author, func, 1)
name = f"{ctx.author.name}_Member_avatar"
await self._send_image(ctx, modified_avatar, name, "**Your New Avatar**", delete_after=300) # change completion line to "Pledge your allegiance to the Antistasi Rebellion!"?
await ctx.message.delete()
@auto_meta_info_command(aliases=["get_image"])
@allowed_channel_and_allowed_role()
async def get_stamp_image(self, ctx: commands.Context, image_name: str):
image_name = image_name.split('.')[0].upper()
if image_name not in self.stamps:
await ctx.send(f"Don't have an image named `{image_name}` saved!", delete_after=120)
return
image = self.stamps.get(image_name)
embed_data = await self.bot.make_generic_embed(title=image_name, description="Your requested image", thumbnail=None, image=image)
await ctx.reply(**embed_data, allowed_mentions=discord.AllowedMentions.none())
@auto_meta_info_command(aliases=["add_image"])
@allowed_channel_and_allowed_role()
@has_attachments(1)
@log_invoker(log, "critical")
async def add_stamp(self, ctx: commands.Context):
"""
Adds a new stamp image to the available stamps.
This command needs to have the image as an attachment.
Example:
@AntiPetros add_stamp
"""
attachment = ctx.message.attachments[0]
file_name = attachment.filename
if file_name.casefold() in {file.casefold() for file in os.listdir(self.stamp_location)}:
await ctx.reply(f"A Stamp file with the name `{file_name}` already exists, aborting!")
return
path = pathmaker(self.stamp_location, file_name)
await attachment.save(path)
stamp_name = file_name.split('.')[0].replace(' ', '_').strip().upper()
await ctx.reply(f"successfully, saved new stamp. The stamp name to use is `{stamp_name}`")
await self.bot.creator.send(f"New stamp was added by `{ctx.author.name}`", file=await attachment.to_file())
self._get_stamps()
@auto_meta_info_command()
@allowed_channel_and_allowed_role(in_dm_allowed=False)
@has_attachments(1)
async def text_to_image(self, ctx: commands.Context, font: str, *, text: str):
mod_font_name = font.split('.')[0].casefold()
if mod_font_name not in self.fonts:
embed_data = await self.bot.make_generic_embed(title='Unkown Font', description=f"No font available with the name `{font}`.\nYou may have to add it via `@AntiPetros add_font`",
thumbnail="cancelled")
await ctx.send(**embed_data, delete_after=120)
return
image_attachment = ctx.message.attachments[0]
if image_attachment.filename.split('.')[-1].casefold() not in ['jpeg', 'png', 'jpg', 'tga']:
embed_data = await self.bot.make_generic_embed(title="Wrong Image Format", description=f"Image need to be either `jpeg`, `png` or `tga` and not `{image_attachment.filename.split('.')[-1]}`",
thumbnail="cancelled")
await ctx.send(**embed_data, delete_after=120)
return
with TemporaryDirectory() as tempdir:
imagefilepath = pathmaker(tempdir, image_attachment.filename)
await image_attachment.save(imagefilepath)
base_image = Image.open(imagefilepath)
base_image.load()
width, height = base_image.size
image_font = await asyncio.to_thread(find_min_fontsize, self.fonts.get(mod_font_name), [line for line in text.splitlines() if line != ''], width, height)
top_space = 0
for line in text.splitlines():
if line == '':
top_space += ((height // 20) * 2)
else:
base_image, top_space = await asyncio.to_thread(self.draw_text_line, base_image, line, top_space, image_font)
await self._send_image(ctx, base_image, image_attachment.filename.split('.')[0] + '_with_text.png', "Modified Image", message_text="Here is your image with pasted Text", image_format='png')
@auto_meta_info_command()
@owner_or_admin()
@has_attachments(1)
async def add_font(self, ctx: commands.Context):
font_attachment = ctx.message.attachments[0]
if font_attachment.filename.split('.')[-1] != 'ttf':
embed_data = await self.bot.make_generic_embed(title='Wrong input filetype', description=f"Attachment has to be a Truetype Font (extension: `.ttf`) and not `.{font_attachment.filename.split('.')[-1]}`",
thumbnail="not_possible")
await ctx.send(**embed_data, delete_after=120)
return
new_path = pathmaker(APPDATA['fonts'], font_attachment.filename)
await font_attachment.save(new_path)
embed_data = await self.bot.make_generic_embed(title="Added new Font", description=f"Font `{font_attachment.filename}` was successfully saved!",
thumbnail="save")
await ctx.send(**embed_data, delete_after=300)
async def _make_font_preview(self, font_name, font_path):
b_image = Image.new('RGBA', (512, 512), color=(256, 256, 256, 0))
image_font = await asyncio.to_thread(make_perfect_fontsize, font_path, font_name, 512, 512)
preview_image = await asyncio.to_thread(self.draw_text_center, b_image, font_name, image_font)
return preview_image
@auto_meta_info_command()
@allowed_channel_and_allowed_role()
async def list_fonts(self, ctx: commands.Context):
embed = discord.Embed(title="Available Fonts")
await ctx.send(embed=embed, delete_after=60)
for font_name, font_path in self.fonts.items():
embed_data = await self.bot.make_generic_embed(title=font_name, image=await self._make_font_preview(font_name, font_path), thumbnail=None)
await ctx.send(**embed_data, delete_after=60)
await asyncio.sleep(60)
await ctx.message.delete()
# endregion[Commands]
# region [HelperMethods]
def _get_nato_symbol_parts(self):
self.nato_symbol_parts_images = {}
for file in os.scandir(self.nato_symbol_parts_location):
if os.path.isfile(file.path) is True:
name = file.name.split('.')[0].replace(' ', '_').strip().upper()
self.nato_symbol_parts_images[name] = file.path
def _get_stamps(self):
self.stamps = {}
for file in os.scandir(self.stamp_location):
if os.path.isfile(file.path) is True:
name = file.name.split('.')[0].replace(' ', '_').strip().upper()
self.stamps[name] = file.path
def _get_stamp_image(self, stamp_name, stamp_opacity):
stamp_name = stamp_name.upper()
image = Image.open(self.stamps.get(stamp_name))
alpha = image.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(stamp_opacity)
image.putalpha(alpha)
return image.copy()
@staticmethod
def _stamp_resize(input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
input_image_width_fractioned = input_image_width * factor
input_image_height_fractioned = input_image_height * factor
transform_factor_width = input_image_width_fractioned / stamp_image.size[0]
transform_factor_height = input_image_height_fractioned / stamp_image.size[1]
transform_factor = (transform_factor_width + transform_factor_height) / 2
return stamp_image.resize((round(stamp_image.size[0] * transform_factor), round(stamp_image.size[1] * transform_factor)), resample=Image.LANCZOS)
def _to_bottom_right(self, input_image, stamp_image, factor):
log.debug('pasting image to bottom_right')
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
def _to_top_right(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_center_right(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(input_image_width - _resized_stamp.size[0] - self.stamp_margin, round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_bottom_left(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
def _to_top_left(self, input_image, stamp_image, factor):
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_center_left(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(0 + self.stamp_margin, round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_center_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), round((input_image_height / 2) - (_resized_stamp.size[1] / 2))),
_resized_stamp)
return input_image
def _to_top_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), 0 + self.stamp_margin),
_resized_stamp)
return input_image
def _to_bottom_center(self, input_image, stamp_image, factor):
input_image_width, input_image_height = input_image.size
_resized_stamp = self._stamp_resize(input_image, stamp_image, factor)
input_image.paste(_resized_stamp,
(round((input_image_width / 2) - (_resized_stamp.size[0] / 2)), input_image_height - _resized_stamp.size[1] - self.stamp_margin),
_resized_stamp)
return input_image
async def _send_image(self, ctx, image, name, message_title, message_text=None, image_format=None, delete_after=None):
image_format = 'png' if image_format is None else image_format
with BytesIO() as image_binary:
image.save(image_binary, image_format.upper(), optimize=True)
image_binary.seek(0)
file = discord.File(fp=image_binary, filename=name.replace('_', '') + '.' + image_format)
embed = discord.Embed(title=message_title, description=message_text, color=self.support.cyan.discord_color, timestamp=datetime.now(tz=timezone("Europe/Berlin")), type='image')
embed.set_author(name='AntiPetros', icon_url="https://www.der-buntspecht-shop.de/wp-content/uploads/Baumwollstoff-Camouflage-olivegruen-2.jpg")
embed.set_image(url=f"attachment://{name.replace('_','')}.{image_format}")
if delete_after is not None:
embed.add_field(name='This Message will self destruct', value=f"in {alt_seconds_to_pretty(delete_after)}")
await ctx.send(embed=embed, file=file, delete_after=delete_after)
async def _member_avatar_helper(self, user: discord.Member, placement: callable, opacity: float):
avatar_image = await self.get_avatar_from_user(user)
stamp = self._get_stamp_image('ASLOGO', opacity)
modified_avatar = await asyncio.to_thread(placement, avatar_image, stamp, self.avatar_stamp_fraction)
return modified_avatar
async def _normalize_pos(self, pos: str):
pos = pos.casefold()
if pos not in self.position_normalization_table:
for key, value in self.position_normalization_table.items():
if pos in value:
return key
raise ParameterError('image_position', pos)
async def get_avatar_from_user(self, user):
avatar = user.avatar_url
temp_dir = TemporaryDirectory()
temp_file = pathmaker(temp_dir.name, 'user_avatar.png')
log.debug("Tempfile '%s' created", temp_file)
await avatar.save(temp_file)
avatar_image = Image.open(temp_file)
avatar_image = avatar_image.copy()
avatar_image = avatar_image.convert('RGB')
temp_dir.cleanup()
return avatar_image
def map_image_handling(self, base_image, marker_name, color, bytes_out):
log.debug("creating changed map, changed_location: '%s', changed_color: '%s'", marker_name, color)
marker_image = self.outpost_overlay.get(marker_name)
marker_alpha = marker_image.getchannel('A')
marker_image = Image.new('RGBA', marker_image.size, color=color)
marker_image.putalpha(marker_alpha)
base_image.paste(marker_image, mask=marker_alpha)
base_image.save(bytes_out, 'PNG', optimize=True)
bytes_out.seek(0)
return base_image, bytes_out
def draw_text_line(self, image: Image, text_line: str, top_space: int, in_font: ImageFont.FreeTypeFont):
width, height = image.size
pfont = in_font
draw = ImageDraw.Draw(image)
w, h = draw.textsize(text_line, font=pfont)
draw.text(((width - w) / 2, h + top_space), text_line, fill=(0, 0, 0), stroke_width=width // 150, stroke_fill=(50, 200, 25), font=pfont)
return image, top_space + h + (height // 20)
def draw_text_center(self, image: Image, text: str, in_font: ImageFont.FreeTypeFont):
width, height = image.size
pfont = in_font
draw = ImageDraw.Draw(image)
w, h = draw.textsize(text, font=pfont)
draw.text(((width - w) / 2, (height - h) / 2), text, fill=(0, 0, 0), stroke_width=width // 150, stroke_fill=(204, 255, 204), font=pfont)
return image
# endregion[HelperMethods]
# region [SpecialMethods]
def __repr__(self):
return f"{self.__class__.__name__}({self.bot.__class__.__name__})"
def __str__(self):
return self.qualified_name
# def cog_unload(self):
# log.debug("Cog '%s' UNLOADED!", str(self))
# endregion[SpecialMethods]
def setup(bot):
"""
Mandatory function to add the Cog to the bot.
"""
bot.add_cog(ImageManipulationCog(bot))
| 49.122417 | 214 | 0.643472 | 3,746 | 30,898 | 5.022424 | 0.139349 | 0.043585 | 0.015148 | 0.020198 | 0.422505 | 0.347348 | 0.323057 | 0.288668 | 0.274158 | 0.246784 | 0 | 0.009874 | 0.242831 | 30,898 | 628 | 215 | 49.200637 | 0.794315 | 0.073144 | 0 | 0.280822 | 0 | 0.006849 | 0.092456 | 0.01625 | 0 | 0 | 0 | 0.003185 | 0 | 1 | 0.057078 | false | 0 | 0.052511 | 0.011416 | 0.212329 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec085bbbaef39f0640fc7171919973e59bcfe3fd | 885 | py | Python | exe84.py | henrique-alvaro/EXERCICIOS | dd2509a682009ee4a01f0ca346d960ab09cc284b | [
"MIT"
] | null | null | null | exe84.py | henrique-alvaro/EXERCICIOS | dd2509a682009ee4a01f0ca346d960ab09cc284b | [
"MIT"
] | null | null | null | exe84.py | henrique-alvaro/EXERCICIOS | dd2509a682009ee4a01f0ca346d960ab09cc284b | [
"MIT"
] | null | null | null | reserva = list()
principal = list()
maior = menor = 0
while True:
reserva.append(str(input('Nome: ')))
reserva.append(float(input('Peso: ')))
if len(principal) == 0:
maior = menor = reserva[1]
else:
if reserva[1] > maior:
maior = reserva[1]
if reserva[1] < menor:
menor = reserva[1]
principal.append(reserva[:])
reserva.clear()
d = ' '
while d not in 'SN':
d = str(input('Deseja Continuar [S/N]: ')).strip().upper()[0]
if d == 'N':
break
print('-='*20)
print(f'Ao todo {len(principal)} pessoas foram inscrito')
print(f'O maior peso foi {maior}. Peso de ', end='')
for c in principal:
if c[1] == maior:
print(f'{c[0]}' , end='')
print()
print(f'O menor peso foi {menor}. Peso de ', end='')
for c in principal:
if c[1] == menor:
print(f'{c[0]}' , end='')
print()
| 26.818182 | 69 | 0.542373 | 126 | 885 | 3.809524 | 0.349206 | 0.083333 | 0.054167 | 0.05 | 0.183333 | 0.183333 | 0.116667 | 0.116667 | 0.116667 | 0.116667 | 0 | 0.021572 | 0.266667 | 885 | 32 | 70 | 27.65625 | 0.718028 | 0 | 0 | 0.1875 | 0 | 0 | 0.19096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec09685680a3107d366ed078bdbf9404ece34bc4 | 1,560 | py | Python | ws2122-lspm/Lib/site-packages/pm4py/algo/discovery/alpha/utils/endpoints.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-19T04:02:46.000Z | 2022-01-19T04:02:46.000Z | ws2122-lspm/Lib/site-packages/pm4py/algo/discovery/alpha/utils/endpoints.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2021-11-19T07:21:48.000Z | 2021-11-19T07:21:48.000Z | ws2122-lspm/Lib/site-packages/pm4py/algo/discovery/alpha/utils/endpoints.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-14T17:15:38.000Z | 2022-01-14T17:15:38.000Z | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
def derive_end_activities_from_log(log, activity_key):
"""
Derive end activities from log
Parameters
-----------
log
Log object
activity_key
Activity key
Returns
-----------
e
End activities
"""
e = set()
for t in log:
if len(t) > 0:
if activity_key in t[len(t) - 1]:
e.add(t[len(t) - 1][activity_key])
return e
def derive_start_activities_from_log(log, activity_key):
"""
Derive start activities from log
Parameters
-----------
log
Log object
activity_key
Activity key
Returns
-----------
s
Start activities
"""
s = set()
for t in log:
if len(t) > 0:
if activity_key in t[0]:
s.add(t[0][activity_key])
return s
| 24.761905 | 76 | 0.604487 | 218 | 1,560 | 4.252294 | 0.422018 | 0.118662 | 0.073355 | 0.061489 | 0.420712 | 0.36246 | 0.30205 | 0.222222 | 0.222222 | 0.222222 | 0 | 0.011029 | 0.302564 | 1,560 | 62 | 77 | 25.16129 | 0.840993 | 0.616026 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec0b058f553343a897e71eea532d993fcfa42a8b | 926 | py | Python | ground_truth_labeling_jobs/bring_your_own_model_for_sagemaker_labeling_workflows_with_active_learning/src/tests/test_add_record_id.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
] | 2,610 | 2020-10-01T14:14:53.000Z | 2022-03-31T18:02:31.000Z | ground_truth_labeling_jobs/bring_your_own_model_for_sagemaker_labeling_workflows_with_active_learning/src/tests/test_add_record_id.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
] | 1,959 | 2020-09-30T20:22:42.000Z | 2022-03-31T23:58:37.000Z | ground_truth_labeling_jobs/bring_your_own_model_for_sagemaker_labeling_workflows_with_active_learning/src/tests/test_add_record_id.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
] | 2,052 | 2020-09-30T22:11:46.000Z | 2022-03-31T23:02:51.000Z | import boto3
from Bootstrap.add_record_id import lambda_handler
from moto import mock_s3
@mock_s3
def test_add_record_id():
manifest_content = b'{"source":"Fed revises guidelines sending stocks up."}\n{"source": "Review Guardians of the Galaxy"}'
s3r = boto3.resource("s3", region_name="us-east-1")
s3r.create_bucket(Bucket="source_bucket")
s3r.Object("source_bucket", "input.manifest").put(Body=manifest_content)
event = {
"ManifestS3Uri": "s3://source_bucket/input.manifest",
}
output = lambda_handler(event, {})
manifest_content_with_id = b'{"source": "Fed revises guidelines sending stocks up.", "id": 0}\n{"source": "Review Guardians of the Galaxy", "id": 1}\n'
updated_body = s3r.Object("source_bucket", "input.manifest").get()["Body"].read()
assert updated_body == manifest_content_with_id
assert output["ManifestS3Uri"] == "s3://source_bucket/input.manifest"
| 37.04 | 155 | 0.708423 | 125 | 926 | 5.04 | 0.416 | 0.095238 | 0.107937 | 0.15873 | 0.473016 | 0.473016 | 0.238095 | 0.133333 | 0 | 0 | 0 | 0.020151 | 0.142549 | 926 | 24 | 156 | 38.583333 | 0.7733 | 0 | 0 | 0 | 0 | 0.117647 | 0.426566 | 0.071274 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec0f9ff9cd997919f338a2ecbd614e9c94929507 | 828 | py | Python | pages/homepage.py | kriss-u/selenium-python-framework | d4f6416de25c955861f6d8a099a780c702693b63 | [
"MIT"
] | null | null | null | pages/homepage.py | kriss-u/selenium-python-framework | d4f6416de25c955861f6d8a099a780c702693b63 | [
"MIT"
] | 6 | 2021-04-26T16:12:48.000Z | 2021-04-29T17:05:33.000Z | pages/homepage.py | kriss-u/selenium-python-framework | d4f6416de25c955861f6d8a099a780c702693b63 | [
"MIT"
] | null | null | null | from selenium.webdriver.common.by import By
from utilities import find_all_contains_text
from utilities import find_one_present
from utilities import wait
class Homepage:
def __init__(self, driver, base_url, timeout=10):
self.driver = driver
self.base_url = base_url
self.wait = wait(driver, timeout)
def open(self):
self.driver.get(f"{self.base_url}")
def type_search(self, term):
driver = self.driver
search_input = find_one_present(driver, "//input[@name='q']", "xpath")
search_input.send_keys(term)
return find_all_contains_text(term, driver, "//ul[@role='listbox']//li", "xpath")
def clear_search(self):
driver = self.driver
search_input = driver.find_element(By.XPATH, "//input[@name='q']")
search_input.clear()
| 30.666667 | 89 | 0.669082 | 112 | 828 | 4.714286 | 0.383929 | 0.113636 | 0.107955 | 0.087121 | 0.102273 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003053 | 0.208937 | 828 | 26 | 90 | 31.846154 | 0.803053 | 0 | 0 | 0.1 | 0 | 0 | 0.103865 | 0.030193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec12e98cc644839d7e6aaadb7ca7b24595f6ffa0 | 8,897 | py | Python | main/page/foreign_transfer.py | keith-lewis100/pont-workbench | 010716e115c47ca881645800befffcc97d07f638 | [
"MIT"
] | null | null | null | main/page/foreign_transfer.py | keith-lewis100/pont-workbench | 010716e115c47ca881645800befffcc97d07f638 | [
"MIT"
] | null | null | null | main/page/foreign_transfer.py | keith-lewis100/pont-workbench | 010716e115c47ca881645800befffcc97d07f638 | [
"MIT"
] | null | null | null | #_*_ coding: UTF-8 _*_
from flask import request, redirect, render_template
from application import app
import wtforms
import db
import data_models
import mailer
import renderers
import properties
import views
from role_types import RoleType
import urls
from . import grants
from . import purchases
STATE_REQUESTED = 1
STATE_TRANSFERRED = 2
state_labels = ['Closed', 'Requested', 'Transferred']
class TransferModel(data_models.Model):
def __init__(self, entity, grant_list, payment_list):
super(TransferModel, self).__init__(entity, None)
self.grant_list = grant_list
self.payment_list = payment_list
def perform_transferred(self, action_name):
form = self.get_form(action_name)
if not form.validate():
return False
transfer = self.entity
form.populate_obj(transfer)
transfer.state_index = STATE_TRANSFERRED
transfer.put()
parent_audit = self.audit(action_name, 'Transfer performed')
for payment in self.payment_list:
payment.paid = True
payment.put()
purchase = data_models.get_parent(payment)
if payment.payment_type == 'invoice':
purchase.state_index = data_models.STATE_CLOSED
purchase.put()
self.audit(action_name, 'Payment transferred', purchase, parent_audit.key)
data_models.email_entity_creator(purchase, self.user, 'Payment transferred')
for grant in self.grant_list:
data_models.email_entity_creator(grant, self.user, 'Transfer performed')
self.send_supplier_email()
return True
def send_supplier_email(self):
transfer = self.entity
supplier = data_models.get_parent(transfer)
column = views.view_entity_single_column(transfer, email_properties)
purchase_payments = render_purchase_payments_list(self.payment_list)
grant_payments = render_grants_due_list(self.grant_list, selectable=False, no_links=True)
content = renderers.render_div(column, purchase_payments, grant_payments)
mailer.send_email('PONT Transfer %s' % transfer.ref_id, content, supplier.contact_emails)
def perform_ack(self, action_name):
parent_audit = self.perform_close(action_name)
transfer = self.entity
for grant in self.grant_list:
project = grant.project.get()
if project.partner is None:
grant.state_index = data_models.STATE_CLOSED
grant.put()
self.audit(action_name, 'Transfer acknowledged', grant, parent_audit.key)
data_models.email_entity_creator(grant, self.user, 'Transfer acknowledged')
return True
ACTION_TRANSFERRED = views.StateAction('transferred', 'Transferred', RoleType.PAYMENT_ADMIN,
TransferModel.perform_transferred, [STATE_REQUESTED])
ACTION_ACKNOWLEDGED = views.StateAction('ack', 'Received', RoleType.PAYMENT_ADMIN,
TransferModel.perform_ack, [STATE_TRANSFERRED])
action_list = [ACTION_TRANSFERRED, ACTION_ACKNOWLEDGED]
def show_totals(transfer):
sterling, shillings = transfer.totals
return u"£{:,} + {:,} Ush".format(sterling, shillings)
def show_shillings(transfer):
if not transfer.exchange_rate:
return ""
sterling, shillings = transfer.totals
total_shillings = int(sterling * transfer.exchange_rate) + shillings
return u"{:,} Ush".format(total_shillings)
ref_field = properties.StringProperty('ref_id')
state_field = properties.SelectProperty('state_index', 'State', enumerate(state_labels))
creator_field = properties.KeyProperty('creator')
creation_date_field = properties.DateProperty('creation_date', format='%Y-%m-%d')
rate_field = properties.StringProperty('exchange_rate')
request_totals_field = properties.StringProperty(show_totals, 'Request Totals')
shillings_total_field = properties.StringProperty(show_shillings, 'Total Amount')
def get_partner(grant):
project = grant.project.get()
if project.partner:
return project.partner.get().name
return ""
grant_field_list = [
grants.state_field, grants.creator_field, grants.project_field, grants.amount_field,
grants.transferred_amount_field,
properties.StringProperty(get_partner, 'Implementing Partner'),
grants.source_field,
properties.StringProperty(lambda e: e.project.get().fund.get().name, 'Destination Fund')
]
po_number_field = properties.StringProperty(lambda e: e.key.parent().get().po_number, 'PO Number')
requestor_field = properties.KeyProperty(lambda e: e.key.parent().get().creator, 'Requestor')
source_field = properties.StringProperty(lambda e: e.key.parent().parent().get().code, 'Source Fund')
payment_field_list = [purchases.payment_type_field, po_number_field, requestor_field, source_field,
purchases.payment_amount_field]
class ExchangeRateForm(wtforms.Form):
exchange_rate = wtforms.IntegerField('Exchange Rate', validators=[wtforms.validators.InputRequired()])
@app.route('/foreigntransfer_list/<db_id>')
def view_foreigntransfer_list(db_id):
supplier = data_models.lookup_entity(db_id)
dummy_transfer = db.ForeignTransfer(parent=supplier.key)
model = data_models.Model(dummy_transfer, None)
breadcrumbs = views.view_breadcrumbs(supplier)
transfer_query = db.ForeignTransfer.query(ancestor=supplier.key).order(-db.ForeignTransfer.state_index,
db.ForeignTransfer.ref_id)
transfer_fields = [state_field, ref_field, creation_date_field, rate_field]
model.show_closed = request.args.has_key('show_closed')
db_filter = db.ForeignTransfer.state_index == 0 if model.show_closed else db.ForeignTransfer.state_index > 0
transfer_query = transfer_query.filter(db_filter)
entity_table = views.view_entity_list(transfer_query.fetch(), transfer_fields)
buttons = views.view_actions([views.ACTION_FILTER], model)
user_controls = views.view_user_controls(model)
return render_template('layout.html', title='Foreign Transfer List', breadcrumbs=breadcrumbs,
user=user_controls, buttons=buttons, content=entity_table)
def render_grants_due_list(grant_list, selectable=True, no_links=True):
sub_heading = renderers.sub_heading('Grant Payments')
table = views.view_entity_list(grant_list, grant_field_list, selectable, no_links)
return (sub_heading, table)
def render_purchase_payments_list(payment_list):
column_headers = properties.get_labels(payment_field_list)
payment_grid = properties.display_entity_list(payment_list, payment_field_list, no_links=True)
purchase_list = [data_models.get_parent(e) for e in payment_list]
payment_url_list = map(urls.url_for_entity, purchase_list)
sub_heading = renderers.sub_heading('Purchase Payments')
table = renderers.render_table(column_headers, payment_grid,
payment_url_list)
return (sub_heading, table)
def calculate_totals(payments):
total_sterling = 0
total_shillings = 0
for p in payments:
if p.amount.currency == 'sterling':
total_sterling += p.amount.value
else:
total_shillings += p.amount.value
return (total_sterling, total_shillings)
@app.route('/foreigntransfer/<db_id>', methods=['GET', 'POST'])
def view_foreigntransfer(db_id):
transfer = data_models.lookup_entity(db_id)
grant_list = db.Grant.query(db.Grant.transfer == transfer.key).fetch()
payment_list = db.PurchasePayment.query(db.PurchasePayment.transfer == transfer.key).fetch()
transfer.totals = calculate_totals(grant_list + payment_list)
form = ExchangeRateForm(request.form)
model = TransferModel(transfer, grant_list, payment_list)
model.add_form(ACTION_TRANSFERRED.name, form)
if request.method == 'POST'and views.handle_post(model, action_list):
return redirect(request.base_url)
transfer_fields = (creation_date_field, ref_field, state_field, rate_field, request_totals_field,
shillings_total_field, creator_field)
breadcrumbs = views.view_breadcrumbs_list(transfer)
grid = views.view_entity(transfer, transfer_fields)
grant_payments = render_grants_due_list(grant_list)
purchase_payments = render_purchase_payments_list(payment_list)
history = views.view_entity_history(transfer.key)
content = renderers.render_div(grid, purchase_payments, grant_payments, history)
buttons = views.view_actions(action_list, model)
user_controls = views.view_user_controls(model)
return render_template('layout.html', title='Foreign Transfer', breadcrumbs=breadcrumbs, user=user_controls,
buttons=buttons, content=content)
email_properties = (ref_field, shillings_total_field)
| 46.098446 | 112 | 0.725188 | 1,063 | 8,897 | 5.780809 | 0.171214 | 0.021155 | 0.037754 | 0.009764 | 0.237592 | 0.177543 | 0.109845 | 0.089504 | 0.046216 | 0.030269 | 0 | 0.000961 | 0.18096 | 8,897 | 192 | 113 | 46.338542 | 0.842185 | 0.00236 | 0 | 0.10303 | 0 | 0 | 0.063331 | 0.005973 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072727 | false | 0 | 0.078788 | 0 | 0.254545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec1403cf6f7b91861d5877a772a1b93da4045f15 | 2,332 | py | Python | finite_element_networks/lightning/data/common.py | martenlienen/finite-element-networks | 5e8f6ecc473d1e93ccf366fcc45a47b08492ffde | [
"MIT"
] | 5 | 2022-03-21T12:39:01.000Z | 2022-03-31T06:02:01.000Z | finite_element_networks/lightning/data/common.py | martenlienen/finite-element-networks | 5e8f6ecc473d1e93ccf366fcc45a47b08492ffde | [
"MIT"
] | null | null | null | finite_element_networks/lightning/data/common.py | martenlienen/finite-element-networks | 5e8f6ecc473d1e93ccf366fcc45a47b08492ffde | [
"MIT"
] | 1 | 2022-03-26T02:58:58.000Z | 2022-03-26T02:58:58.000Z | from dataclasses import dataclass
from typing import Callable, Optional
import numpy as np
from scipy.spatial import Delaunay
from ...data import TimeEncoder
from ...domain import (
BoundaryAnglePredicate,
CellPredicate,
Domain,
select_boundary_mesh_cells,
)
@dataclass(frozen=True)
class MeshConfig:
"""Configuration for the generation of a sparse mesh from a larger set of points.
Attributes
----------
k
Number of nodes to select
epsilon
Maximum angle of boundary cells to filter out in degrees
seed
Random seed for reproducibility
"""
k: int
epsilon: float
seed: int
def random_state(self):
return np.random.RandomState(int(self.seed) % 2**32)
def epsilon_radians(self):
return self.epsilon * np.pi / 180
def angle_predicate(self, tri: Delaunay):
return BoundaryAnglePredicate(tri.points, self.epsilon_radians())
def sample_mesh(
config: MeshConfig,
points: np.ndarray,
predicate_factory: Optional[Callable[[Delaunay], CellPredicate]] = None,
) -> tuple[np.ndarray, Domain]:
"""Create a domain from a subset of points, optionally filtering out some cells.
Returns
-------
Indices of the points that were selected as mesh nodes and the domain
"""
import skfem
from sklearn_extra.cluster import KMedoids
# Select k sparse observation points uniformly-ish
km = KMedoids(
n_clusters=config.k, init="k-medoids++", random_state=config.random_state()
)
km.fit(points)
node_indices = km.medoid_indices_
# Mesh the points with Delaunay triangulation
tri = Delaunay(points[node_indices])
# Filter out mesh boundary cells that are too acute or contain mostly land
if predicate_factory is not None:
predicate = predicate_factory(tri)
filter = select_boundary_mesh_cells(tri, predicate)
tri.simplices = tri.simplices[~filter]
# Ensure that every node is in at least one mesh cell
cell_counts = np.zeros(config.k, dtype=int)
np.add.at(cell_counts, tri.simplices, 1)
assert all(cell_counts >= 1)
mesh = skfem.MeshTri(
np.ascontiguousarray(tri.points.T), np.ascontiguousarray(tri.simplices.T)
)
domain = Domain(tri.points, mesh=mesh)
return node_indices, domain
| 27.435294 | 85 | 0.690823 | 300 | 2,332 | 5.283333 | 0.423333 | 0.030284 | 0.022713 | 0.029022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004425 | 0.2247 | 2,332 | 84 | 86 | 27.761905 | 0.872235 | 0.268868 | 0 | 0 | 0 | 0 | 0.00672 | 0 | 0 | 0 | 0 | 0 | 0.021277 | 1 | 0.085106 | false | 0 | 0.170213 | 0.06383 | 0.425532 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec15071df1bf9ea9d459a13a9ce32f102c847633 | 1,245 | py | Python | DataProcess/community_detection.py | teamclouday/Mooner | ed7deed101e92b1d8f5ec47091cdbdadb2c1159c | [
"MIT"
] | null | null | null | DataProcess/community_detection.py | teamclouday/Mooner | ed7deed101e92b1d8f5ec47091cdbdadb2c1159c | [
"MIT"
] | null | null | null | DataProcess/community_detection.py | teamclouday/Mooner | ed7deed101e92b1d8f5ec47091cdbdadb2c1159c | [
"MIT"
] | null | null | null | import os
import community
import pandas as pd
import networkx as nx
graphdf = pd.read_csv(os.path.join("..", "NetworkData", "fetchcontent.csv"))
G = nx.from_pandas_edgelist(graphdf)
partition = community.best_partition(G)
# following code comes from https://medium.com/@adityagandhi.7/network-analysis-and-community-structure-for-market-surveillance-using-python-networkx-65413e7b7fee
values=[partition.get(node) for node in G.nodes()]
list_com=partition.values()
# Creating a dictionary like {community_number:list_of_participants}
dict_nodes={}
# Populating the dictionary with items
for each_item in partition.items():
community_num=each_item[1]
community_node=str(each_item[0])
if community_num in dict_nodes.keys():
value=dict_nodes.get(community_num) + ' | ' + community_node
dict_nodes.update({community_num:value})
else:
dict_nodes.update({community_num:community_node})
# Creating a dataframe from the diet, and getting the output into excel
community_df=pd.DataFrame.from_dict(dict_nodes, orient='index',columns=['Members'])
community_df.index.rename('Community Num' , inplace=True)
community_df.to_csv('community.csv')
print("Number of communities: {}".format(len(community_df.index))) | 36.617647 | 162 | 0.767871 | 176 | 1,245 | 5.255682 | 0.488636 | 0.058378 | 0.045405 | 0.054054 | 0.058378 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009042 | 0.111647 | 1,245 | 34 | 163 | 36.617647 | 0.827306 | 0.268273 | 0 | 0 | 0 | 0 | 0.104741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec18386bf1e4f992e71d8f27e8056dac7297cb93 | 1,404 | py | Python | getFileList.py | DoJing/hd3 | 124b1b169422898830d2cd602cf6e074f1238b6f | [
"BSD-3-Clause"
] | null | null | null | getFileList.py | DoJing/hd3 | 124b1b169422898830d2cd602cf6e074f1238b6f | [
"BSD-3-Clause"
] | null | null | null | getFileList.py | DoJing/hd3 | 124b1b169422898830d2cd602cf6e074f1238b6f | [
"BSD-3-Clause"
] | null | null | null | import os
import cv2
def ListFilesToTxt(dir,file,wildcard,recursion):
file_list=[]
exts = wildcard.split(" ")
files = os.listdir(dir)
for name in files:
fullname=os.path.join(dir,name)
if(os.path.isdir(fullname) & recursion):
ListFilesToTxt(fullname,file,wildcard,recursion)
else:
for ext in exts:
if(name.endswith(ext)):
file_list.append(name)
break
file_list.sort()
resize = False
factor = 0.5
if(resize):
for i in range(0,len(file_list)):
fullname=os.path.join(dir,file_list[i])
img=cv2.imread(fullname)
height, width = img.shape[:2]
size = (int(width * factor), int(height * factor))
img=cv2.resize(img,size)
fullname = os.path.join("/media/doing/C8BA5288BA5272C4/LINUX/pot", file_list[i])
cv2.imwrite(fullname,img)
for i in range(0,len(file_list)-1):
file.write(file_list[i] + " ")
file.write(file_list[i+1]+"\n")
def getFileList():
dir="/media/doing/Samsung USB/flowerpot" #文件路径
outfile="flowerpot.txt" #写入的txt文件名
wildcard = ".JPG" #要读取的文件类型;
file = open(outfile,"w")
if not file:
print ("cannot open the file %s for writing" % outfile)
ListFilesToTxt(dir,file,wildcard, 1)
file.close()
getFileList()
| 29.87234 | 92 | 0.581909 | 178 | 1,404 | 4.539326 | 0.404494 | 0.089109 | 0.044554 | 0.066832 | 0.153465 | 0.056931 | 0.056931 | 0.056931 | 0 | 0 | 0 | 0.021912 | 0.2849 | 1,404 | 46 | 93 | 30.521739 | 0.782869 | 0.01567 | 0 | 0 | 0 | 0 | 0.094271 | 0.028281 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.1 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec18cd1695ba62deebb499631731dcc890f6543b | 7,647 | py | Python | Lib/site-packages/ginga/canvas/transform.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/ginga/canvas/transform.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/ginga/canvas/transform.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | #
# transform.py -- coordinate transforms for Ginga
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
from ginga import trcalc
__all__ = ['TransformError', 'BaseTransform', 'ComposedTransform',
'CanvasWindowTransform', 'CartesianWindowTransform',
'RotationTransform', 'ScaleTransform',
'DataCartesianTransform', 'OffsetDataTransform',
'WCSDataTransform',
]
class TransformError(Exception):
pass
class BaseTransform(object):
def __init__(self):
super(BaseTransform, self).__init__()
def to_(self, x, y):
raise TransformError("subclass should override this method")
def from_(self, tx, ty):
raise TransformError("subclass should override this method")
def __add__(self, trans):
return ComposedTransform(self, trans)
class ComposedTransform(BaseTransform):
"""
A transform that composes two other transforms to make a new one.
"""
def __init__(self, tform1, tform2):
super(ComposedTransform, self).__init__()
self.tform1 = tform1
self.tform2 = tform2
def to_(self, x, y, **kwargs):
return self.tform2.to_(*self.tform1.to_(x, y, **kwargs))
def from_(self, tx, ty, **kwargs):
return self.tform1.from_(*self.tform2.from_(tx, ty), **kwargs)
class CanvasWindowTransform(BaseTransform):
"""
A transform from a possibly Y-flipped pixel space to a typical
window pixel coordinate space with the lower left at (0, 0).
"""
def __init__(self, viewer):
super(CanvasWindowTransform, self).__init__()
self.viewer = viewer
def to_(self, cvs_x, cvs_y):
if self.viewer._originUpper:
return (cvs_x, cvs_y)
# invert Y coord for backends that have the origin in the lower left
win_wd, win_ht = self.viewer.get_window_size()
win_x, win_y = cvs_x, win_ht - cvs_y
return (win_x, win_y)
def from_(self, win_x, win_y):
return self.to_(win_x, win_y)
class CartesianWindowTransform(BaseTransform):
"""
A transform from cartesian coordinates to the window pixel coordinates
of a viewer.
"""
def __init__(self, viewer, as_int=True):
super(CartesianWindowTransform, self).__init__()
self.viewer = viewer
self.as_int = as_int
def to_(self, off_x, off_y):
# add center pixel to convert from X/Y coordinate space to
# canvas graphics space
ctr_x, ctr_y = self.viewer.get_center()
win_x = off_x + ctr_x
if self.viewer._originUpper:
win_y = ctr_y - off_y
else:
win_y = off_y + ctr_y
# round to pixel units, if asked
if self.as_int:
win_x = np.rint(win_x).astype(np.int)
win_y = np.rint(win_y).astype(np.int)
return (win_x, win_y)
def from_(self, win_x, win_y):
"""Reverse of :meth:`to_`."""
# make relative to center pixel to convert from canvas
# graphics space to standard X/Y coordinate space
ctr_x, ctr_y = self.viewer.get_center()
off_x = win_x - ctr_x
if self.viewer._originUpper:
off_y = ctr_y - win_y
else:
off_y = win_y - ctr_y
return (off_x, off_y)
class RotationTransform(BaseTransform):
"""
A transform in cartesian coordinates based on the flip/swap setting and
rotation setting of a viewer.
"""
def __init__(self, viewer):
super(RotationTransform, self).__init__()
self.viewer = viewer
def to_(self, off_x, off_y):
t_ = self.viewer.t_
if t_['flip_x']:
off_x = - off_x
if t_['flip_y']:
off_y = - off_y
if t_['swap_xy']:
off_x, off_y = off_y, off_x
if t_['rot_deg'] != 0:
off_x, off_y = trcalc.rotate_pt(off_x, off_y, t_['rot_deg'])
return (off_x, off_y)
def from_(self, off_x, off_y):
"""Reverse of :meth:`to_`."""
t_ = self.viewer.t_
if t_['rot_deg'] != 0:
off_x, off_y = trcalc.rotate_pt(off_x, off_y, -t_['rot_deg'])
if t_['swap_xy']:
off_x, off_y = off_y, off_x
if t_['flip_y']:
off_y = - off_y
if t_['flip_x']:
off_x = - off_x
return (off_x, off_y)
class ScaleTransform(BaseTransform):
"""
A transform in cartesian coordinates based on the scale of a viewer.
"""
def __init__(self, viewer):
super(ScaleTransform, self).__init__()
self.viewer = viewer
def to_(self, off_x, off_y):
"""Reverse of :meth:`from_`."""
# scale according to current settings
off_x *= self.viewer._org_scale_x
off_y *= self.viewer._org_scale_y
return (off_x, off_y)
def from_(self, off_x, off_y):
# Reverse scaling
off_x = off_x * (1.0 / self.viewer._org_scale_x)
off_y = off_y * (1.0 / self.viewer._org_scale_y)
return (off_x, off_y)
class DataCartesianTransform(BaseTransform):
"""
A transform from data coordinates to cartesian coordinates based on
a viewer's pan position.
"""
def __init__(self, viewer, use_center=True):
super(DataCartesianTransform, self).__init__()
self.viewer = viewer
# If use_center is True, then the coordinates are mapped such that the
# pixel is centered on the square when the image is zoomed in past
# 1X. This is the specification of the FITS image standard,
# that the pixel is centered on the integer row/column.
self.use_center = use_center
def to_(self, data_x, data_y):
"""Reverse of :meth:`from_`."""
if self.use_center:
data_x -= self.viewer.data_off
data_y -= self.viewer.data_off
# subtract data indexes at center reference pixel
off_x = data_x - self.viewer._org_x
off_y = data_y - self.viewer._org_y
return (off_x, off_y)
def from_(self, off_x, off_y):
# Add data index at center to offset
data_x = self.viewer._org_x + off_x
data_y = self.viewer._org_y + off_y
if self.use_center:
data_x += self.viewer.data_off
data_y += self.viewer.data_off
return (data_x, data_y)
class OffsetDataTransform(BaseTransform):
"""
A transform whose coordinate space is offsets from a point in
data space.
"""
def __init__(self, pt):
super(OffsetDataTransform, self).__init__()
self.pt = pt
def to_(self, delta_x, delta_y):
ref_x, ref_y = self.pt[:2]
return (ref_x + delta_x, ref_y + delta_y)
def from_(self, data_x, data_y):
ref_x, ref_y = self.pt[:2]
return (data_x - ref_x, data_y - ref_y)
class WCSDataTransform(BaseTransform):
"""
A transform whose coordinate space is based on the WCS of the primary
image loaded in a viewer.
"""
def __init__(self, viewer):
super(WCSDataTransform, self).__init__()
self.viewer = viewer
def to_(self, lon, lat):
image = self.viewer.get_image()
if image is None:
raise TransformError("No image, no WCS")
data_x, data_y = image.radectopix(lon, lat)
return (data_x, data_y)
def from_(self, data_x, data_y):
image = self.viewer.get_image()
if image is None:
raise TransformError("No image, no WCS")
lon, lat = image.pixtoradec(data_x, data_y)
return (lon, lat)
#END
| 28.427509 | 78 | 0.613574 | 1,058 | 7,647 | 4.135161 | 0.169187 | 0.077714 | 0.0336 | 0.032914 | 0.465371 | 0.403886 | 0.383314 | 0.294857 | 0.221029 | 0.1712 | 0 | 0.003849 | 0.286518 | 7,647 | 268 | 79 | 28.533582 | 0.798021 | 0.212109 | 0 | 0.442177 | 0 | 0 | 0.059418 | 0.011473 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0.006803 | 0.013605 | 0.027211 | 0.394558 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec1e30101ec04ff986f4f441cf6da3dbede160cc | 13,178 | py | Python | src/mvnfeed_modules/mvnfeed-cli-transfer/mvnfeed/cli/transfer/transfer.py | Bhaskers-Blu-Org2/mvnfeed-cli | cf4b43e300edee8f5bc64de9bcf1faf924fa2737 | [
"MIT"
] | 8 | 2019-08-05T20:28:45.000Z | 2021-09-02T09:20:59.000Z | src/mvnfeed_modules/mvnfeed-cli-transfer/mvnfeed/cli/transfer/transfer.py | Bhaskers-Blu-Org2/mvnfeed-cli | cf4b43e300edee8f5bc64de9bcf1faf924fa2737 | [
"MIT"
] | 3 | 2019-07-31T10:12:52.000Z | 2021-09-13T12:01:51.000Z | src/mvnfeed_modules/mvnfeed-cli-transfer/mvnfeed/cli/transfer/transfer.py | easterapps/mvnfeed-cli | cf4b43e300edee8f5bc64de9bcf1faf924fa2737 | [
"MIT"
] | 11 | 2019-07-31T12:58:14.000Z | 2021-09-13T12:50:21.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import logging
import os.path
import requests
import shutil
import xml.etree.ElementTree as ET
try:
# Python 3
from urllib.request import Request, urlopen
except ImportError:
# Python 2
from urllib2 import Request, urlopen
from .configuration import get_repository, get_stagedir, get_repository_shortname
from mvnfeed.cli.common.config import AUTHORIZATION, URL, load_config
def transfer_artifact(name, from_repo, to_repo, transfer_deps=False):
"""
Transfers a single artifact.
:param name: name of the artifact to download, following the group_id:artifact_id:version format
:param from_repo: name of the source repository
:param to_repo: name of the destination repository
:param transfer_deps: True if the dependencies must be transferred
"""
logging.info('transferring %s', name)
config = load_config()
from_repository = get_repository(config, from_repo)
to_repository = get_repository(config, to_repo)
stage_dir = get_stagedir(config)
_transfer_single_artifact(name, from_repository, to_repository, stage_dir, transfer_deps)
def transfer_bulk(filename, from_repo, to_repo, transfer_deps=False):
"""
Transfers artifacts from a file, one artifact per line.
:param filename: name of the file containing the mvnfeed to upload
:param from_repo: name of the source repository
:param to_repo: name of the destination repository
:param transfer_deps: True if the dependencies must be transferred
"""
logging.info('transferring from file %s', filename)
config = load_config()
from_repository = get_repository(config, from_repo)
to_repository = get_repository(config, to_repo)
stage_dir = get_stagedir(config)
with open(filename, 'r') as file:
lines = file.readlines()
for line in lines:
line = line.strip().rstrip()
if line:
_transfer_single_artifact(line, from_repository, to_repository, stage_dir, transfer_deps)
def _transfer_single_artifact(name, from_repository, to_repository, stage_dir, transfer_deps):
logging.debug('download url: %s', from_repository[URL])
logging.debug('upload url: %s', to_repository[URL])
logging.debug('stage directory: %s', stage_dir)
if not os.path.exists(stage_dir):
raise ValueError('Output directory doesn\'t exist: ' + stage_dir)
values = name.split(':')
if len(values) == 3:
group_id = values[0]
artifact_name = values[1]
# try to guess if we have a bom file
if '-bom' in artifact_name:
artifact_type = 'pom'
else:
artifact_type = 'jar'
version = values[2]
artifact_fullname = artifact_name + '-' + version
elif len(values) == 4:
group_id = values[0]
artifact_name = values[1]
artifact_type = values[2]
version = values[3]
artifact_fullname = artifact_name + '-' + version
elif len(values) == 5:
group_id = values[0]
artifact_name = values[1]
artifact_type = values[2]
version = values[4]
artifact_fullname = artifact_name + '-' + version + '-' + values[3]
else:
logging.warning('Artifact doesn\'t have correct format. Skipping ' + name)
return
artifact_path = group_id.replace('.', '/') + '/' + artifact_name + '/' + version
if artifact_type in ['jar', 'war']:
files2transfer = _java_artifacts(artifact_fullname, artifact_type, artifact_path, transfer_deps)
else:
files2transfer = _untyped_artifacts(artifact_fullname, artifact_type, artifact_path, transfer_deps)
for file2transfer in files2transfer:
artifact_relativepath = file2transfer['path'] + '/' + file2transfer['name']
already_uploaded = _already_uploaded(to_repository, artifact_relativepath)
if already_uploaded and not file2transfer['name'].endswith('.pom'):
logging.info('%s already uploaded. Skipping', file2transfer['name'])
continue
# let's always download POM files in case we need to process the parent POM
# once again or upload the children dependencies.
outfile = os.path.join(stage_dir, file2transfer['name'])
_download_file(from_repository, artifact_relativepath, outfile)
if not os.path.exists(outfile):
logging.info('%s was not downloaded. Skipping', outfile)
if file2transfer['target']:
logging.warning('%s was not found in the repository', file2transfer['name'])
continue
if not already_uploaded:
_upload_file(to_repository, artifact_relativepath, outfile)
if file2transfer['name'].endswith('.pom'):
# a library will not be installed if it's parent pom.xml file
# is not present in the repository, so let's transfer the
# parent POM file but without transferring its dependencies.
tree = ET.parse(outfile)
parentNode = tree.getroot().find('{http://maven.apache.org/POM/4.0.0}parent')
if parentNode is not None:
parent_group_id = _findNodeValue(parentNode, 'groupId')
parent_artifact_id = _findNodeValue(parentNode, 'artifactId')
parent_version = _findNodeValue(parentNode, 'version')
parent_path = parent_group_id.replace('.', '/') + '/' + parent_artifact_id + '/' + parent_version
files2transfer.append(_pom_artifact(parent_artifact_id + '-' + parent_version, parent_path))
if 'transfer_deps' not in file2transfer or not file2transfer['transfer_deps']:
logging.info('not transferring dependencies from %s', file2transfer['name'])
continue
# try to download the dependencies
dependenciesNode = tree.getroot().find('{http://maven.apache.org/POM/4.0.0}dependencies')
if dependenciesNode is None:
continue
logging.debug("Downloading children")
for dependencyNode in dependenciesNode.getchildren():
dep_group_id = _findNodeValue(dependencyNode, 'groupId')
dep_artifact_id = _findNodeValue(dependencyNode, 'artifactId')
dep_version = _findNodeValue(dependencyNode, 'version')
# we're only downloading `compile` versions. The user can
# easily download other dependencies if needed.
dep_scope = _findNodeValue(dependencyNode, 'scope')
if dep_scope is not None and dep_scope != 'compile':
logging.info('not downloading %s:%s with scope %s', dep_group_id, dep_artifact_id, dep_scope)
continue
# if no version has been defined, than it's getting potentially
# tricky so let's just give up and let the user deal with it
if dep_version is None:
logging.error('missing explicit version for %s:%s in %s. Skipping',
dep_group_id, dep_artifact_id, file2transfer['name'])
continue
# let's download the dependency
artifact_fullname = dep_artifact_id + '-' + dep_version
artifact_path = dep_group_id.replace('.', '/') + '/' + dep_artifact_id + '/' + dep_version
files2transfer.extend(_java_artifacts(artifact_fullname, 'jar', artifact_path, transfer_deps))
# Definitions of the artifacts to download:
# - name: name of the artifact
# - path: full path of the artifact, will be prepended to the urls
# - transfer_deps: true if the dependencies defined in the pom file must be tranferred
# - target: true if definition was created for an artifact that was
# explicitely requested. Used for logging purpose.
def _pom_artifact(artifact_fullname, artifact_path):
return {
'name': artifact_fullname + '.pom',
'path': artifact_path,
'transfer_deps': False,
'target': False
}
def _java_artifacts(artifact_fullname, artifact_type, artifact_path, transfer_deps):
return [
{
'name': artifact_fullname + '.' + artifact_type,
'path': artifact_path,
'target': True
},
{
'name': artifact_fullname + '.pom',
'path': artifact_path,
'transfer_deps': transfer_deps,
'target': False
},
{
'name': artifact_fullname + '-tests.jar',
'path': artifact_path,
'target': False
},
{
'name': artifact_fullname + '-sources.jar',
'path': artifact_path,
'target': False
},
{
'name': artifact_fullname + '-javadoc.jar',
'path': artifact_path,
'target': False
}
]
def _untyped_artifacts(artifact_fullname, artifact_type, artifact_path, transfer_deps):
return [
{
'name': artifact_fullname + '.' + artifact_type,
'path': artifact_path,
'transfer_deps': transfer_deps,
'target': True
},
{
'name': artifact_fullname + '.pom',
'path': artifact_path,
'transfer_deps': transfer_deps,
'target': False
}
]
def _findNodeValue(node, name):
foundNode = node.find('{http://maven.apache.org/POM/4.0.0}' + name)
if foundNode is None:
return None
return foundNode.text
def _download_file(from_repository, path, filename, length=16*1024):
"""
Stores the path into the given filename.
"""
if os.path.exists(filename):
logging.debug('%s already downloaded', filename)
if URL not in from_repository or not from_repository[URL]:
raise ValueError('Repository missing url: ' + get_repository_shortname(from_repository))
url = _append_url(from_repository[URL], path)
logging.debug('downloading from %s', url)
try:
request = Request(url)
if AUTHORIZATION in from_repository and from_repository[AUTHORIZATION]:
logging.debug('authorization header added')
request.add_header('Authorization', from_repository[AUTHORIZATION])
else:
logging.debug('no authorization configured')
response = urlopen(request)
with open(filename, 'wb') as file:
shutil.copyfileobj(response, file, length)
except Exception as ex:
logging.debug('exception while downloading (expected): %s', ex)
None
def _already_uploaded(to_repository, path):
"""
Return True if the file was already uploaded.
"""
if URL not in to_repository or not to_repository[URL]:
raise ValueError('Repository missing upload url: ' + get_repository_shortname(to_repository))
url = _append_url(to_repository[URL], path)
if AUTHORIZATION in to_repository and to_repository[AUTHORIZATION]:
logging.debug('authorization header added')
headers = {'Authorization': to_repository[AUTHORIZATION]}
else:
logging.debug('no authorization configured')
headers = {}
try:
response = requests.head(url, headers=headers)
return response.ok
except Exception as ex:
logging.debug('exception while checking existence %s', ex)
return False
def _upload_file(to_repository, path, filename):
"""
Returns True if the file was uploaded
"""
if not os.path.exists(filename):
# we try to upload a file that was not downloaded (for example an artifact without
# sources.) This is expected to happen and is not an error.
logging.debug('missing file to upload, skipping %s', filename)
return False
if URL not in to_repository or not to_repository[URL]:
raise ValueError('Repository missing upload url: ' + get_repository_shortname(to_repository))
url = _append_url(to_repository[URL], path)
logging.debug('uploading to ' + url)
if AUTHORIZATION in to_repository and to_repository[AUTHORIZATION]:
logging.debug('authorization header added')
headers = {'Authorization': to_repository[AUTHORIZATION]}
else:
logging.debug('no authorization configured')
headers = {}
try:
with open(filename, 'rb') as file:
response = requests.put(url, files={filename: file}, headers=headers)
if not response.ok:
logging.error('error while uploading of %s: %s', path, response.text)
return True
except Exception as ex:
logging.warn('exception while uploading %s', ex)
return False
def _append_url(base_url, fragment):
return base_url + fragment if base_url.endswith('/') else base_url + '/' + fragment
| 39.573574 | 113 | 0.635074 | 1,493 | 13,178 | 5.427328 | 0.179504 | 0.035542 | 0.029619 | 0.026657 | 0.429224 | 0.365173 | 0.346168 | 0.338887 | 0.291127 | 0.269036 | 0 | 0.00522 | 0.258613 | 13,178 | 332 | 114 | 39.692771 | 0.824156 | 0.164744 | 0 | 0.366812 | 0 | 0 | 0.127828 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048035 | false | 0 | 0.043668 | 0.017467 | 0.144105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec1ee81ce412cde25b50543e502ef0e9f4204892 | 16,523 | py | Python | portal/models/intervention.py | uwcirg/true_nth_usa_portal | e2434731aed86f1c43f15d428dde8ffc28ac7e5f | [
"BSD-3-Clause"
] | 3 | 2017-01-15T10:11:57.000Z | 2018-10-02T23:46:44.000Z | portal/models/intervention.py | uwcirg/true_nth_usa_portal | e2434731aed86f1c43f15d428dde8ffc28ac7e5f | [
"BSD-3-Clause"
] | 876 | 2016-04-04T20:45:11.000Z | 2019-02-28T00:10:36.000Z | portal/models/intervention.py | uwcirg/truenth-portal | 459a0d157982f010175c50b9cccd860a61790370 | [
"BSD-3-Clause"
] | 9 | 2016-04-13T01:18:55.000Z | 2018-09-19T20:44:23.000Z | """Intervention Module"""
from flask import current_app
from sqlalchemy import and_
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.exceptions import BadRequest
from ..database import db
from ..dict_tools import strip_empties
from .lazy import query_by_name
from .role import ROLE
LOGOUT_EVENT = 0b001
USER_DOC_EVENT = 0b010
class DisplayDetails(object):
"""Simple abstraction to communicate display details to front end
To provide a custom experience, intevention access can be set at
several levels. For a user, access is either available or not, and when
available, the link controls may be intentionally disabled for a reason the
intervention should note in the status_text field.
Attributes::
access: {True, False}
card_html: Text to display on the card
link_label: Text used to label the button or hyperlink
link_url: URL for the button or link - link to be disabled when null
status_text: Text to inform user of status, or why it's disabled
"""
def __init__(self, access, intervention, user_intervention):
"""Build best set available, prefering values in user_intervention"""
ui = user_intervention
self.access = access
self.card_html = ui and ui.card_html or intervention.card_html
self.link_label = ui and ui.link_label or intervention.link_label
self.link_url = ui and ui.link_url or intervention.link_url
self.status_text = ui and ui.status_text or intervention.status_text
class Intervention(db.Model):
__tablename__ = 'interventions'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
description = db.Column(db.Text, nullable=False)
# nullable as interventions may not have a valid client
client_id = db.Column(db.ForeignKey('clients.client_id'))
card_html = db.Column(db.Text)
link_label = db.Column(db.Text)
link_url = db.Column(db.Text)
status_text = db.Column(db.Text)
public_access = db.Column(db.Boolean, default=True)
display_rank = db.Column(db.Integer)
subscribed_events = db.Column(db.Integer, nullable=False, default=0)
client = db.relationship(
'Client',
primaryjoin="Client.client_id==Intervention.client_id",
uselist=False, backref='Client')
access_strategies = db.relationship(
'AccessStrategy', order_by="AccessStrategy.rank")
@hybrid_property
def subscribed_to_logout_event(self):
return self.subscribed_events & LOGOUT_EVENT
@subscribed_to_logout_event.setter
def subscribed_to_logout_event(self, value):
if value:
self.subscribed_events = self.subscribed_events | LOGOUT_EVENT
else:
self.subscribed_events = self.subscribed_events & ~LOGOUT_EVENT
@hybrid_property
def subscribed_to_user_doc_event(self):
return self.subscribed_events & USER_DOC_EVENT
@subscribed_to_user_doc_event.setter
def subscribed_to_user_doc_event(self, value):
if value:
self.subscribed_events = self.subscribed_events | USER_DOC_EVENT
else:
self.subscribed_events = self.subscribed_events & ~USER_DOC_EVENT
def as_json(self):
"""Returns the 'safe to export' portions of an intervention
The client_id and link_url are non-portable between systems.
The id is also independent - return the rest of the not null
fields as a simple json dict.
NB for staging exclusions to function, link_url and client_id
are now included. Take care to remove it from persistence files
where it is NOT portable, for example, when generating persistence
files programmatically.
"""
d = {'resourceType': 'Intervention'}
for attr in ('name', 'description', 'card_html', 'link_label',
'status_text', 'public_access', 'display_rank',
'subscribed_events', 'link_url', 'client_id'):
if getattr(self, attr, None) is not None:
d[attr] = getattr(self, attr)
return d
@staticmethod
def rct_ids():
"""returns list of RCT (randomized control trial) intervention ids"""
names = current_app.config.get('RCT_INTERVENTIONS')
if not names:
return None
ids = [i.id for i in Intervention.query.filter(
Intervention.name.in_(names))]
if len(ids) != len(names):
raise ValueError(
"can't locate all interventions named in config "
"'RCT_INTERVENTIONS': {}".format(names))
return ids
@classmethod
def from_json(cls, data):
intervention = cls()
return intervention.update_from_json(data)
def update_from_json(self, data):
if 'name' not in data:
raise ValueError("required 'name' field not found")
for attr in ('name', 'description', 'card_html', 'link_label',
'status_text', 'public_access', 'display_rank',
'subscribed_events'):
if attr in data:
setattr(self, attr, data.get(attr))
# link_url and client_id are special - generally we don't pull
# from persisted format as each instance is configured to
# communicate with distinct interventions. As it is used
# for prod -> staging, warn if seen on any other system
if 'link_url' in data and self.link_url != data['link_url']:
if current_app.config.get("SYSTEMT_TYPE", '').lower() != 'staging':
current_app.logger.warning(
"IMPORTING non-portable intervention({}) link_url: '{}'"
"".format(self.name, data['link_url']))
self.link_url = data['link_url']
if 'client_id' in data and self.client_id != data['client_id']:
if current_app.config.get("SYSTEMT_TYPE", '').lower() != 'staging':
current_app.logger.warning(
"IMPORTING non-portable intervention({}) client_id: '{}'"
"".format(self.name, data['client_id']))
self.client_id = data['client_id']
return self
def fetch_strategies(self):
"""Generator to return each registered strategy
Strategies need to be brought to life from their persisted
state. This generator does so, and returns them in a call
ready fashion, ordered by the strategy's rank.
"""
for strat in self.access_strategies:
func = strat.instantiate()
yield func
def display_for_user(self, user):
"""Return the intervention display details for the given user
Somewhat complicated method, depending on intervention configuration.
The following ordered steps are used to determine if a user
should have access to an intervention. The first 'true' found
provides access, otherwise the intervention will not be displayed.
1. call each strategy_function in intervention.access_strategies.
Note, on rare occasions, a strategy may alter the UserIntervention
attributes given the circumstances.
2. check for a UserIntervention row defining access for the given
user on this intervention.
3. check if the intervention has `public_access` set
@return DisplayDetails object defining 'access' and other details
for how to render the intervention.
"""
access = False
# 1. check strategies for access
for func in self.fetch_strategies():
if func(intervention=self, user=user):
access = True
break
# 2. check user_intervention for access
ui = UserIntervention.query.filter_by(
user_id=user.id, intervention_id=self.id).first()
if ui and ui.access == 'granted':
access = True
# 3. check intervention scope for access
# (NB - tempting to shortcut by testing this first, but we
# need to allow all the strategies to run in case they alter settings)
if self.public_access:
access = True
return DisplayDetails(
access=access, intervention=self, user_intervention=ui)
def quick_access_check(self, user):
"""Return boolean representing given user's access to intervention
Somewhat complicated method, depending on intervention configuration.
The following ordered steps are used to determine if a user
should have access to an intervention. The first 'true' found
is returned (as to make the check as quick as possible).
1. check if the intervention has `public_access` set
2. check for a UserIntervention row defining access for the given
user on this intervention.
3. call each strategy_function in intervention.access_strategies.
@return boolean representing 'access'.
"""
# 1. check intervention scope for access
if self.public_access:
return True
# 2. check user_intervention for access
ui = UserIntervention.query.filter_by(
user_id=user.id, intervention_id=self.id).first()
if ui and ui.access == 'granted':
return True
# 3. check strategies for access
for func in self.fetch_strategies():
if func.__name__ == 'update_user_card_html':
return True
if func(intervention=self, user=user):
return True
return False
def __str__(self):
"""print details needed in audit logs"""
if self.name == INTERVENTION.DEFAULT.name:
return ""
return ("Intervention: {0.description}, "
"public_access: {0.public_access}, "
"card_html: {0.card_html}, "
"link_label: {0.link_label}, "
"link_url: {0.link_url}, "
"status_text: {0.status_text},"
"subscribed_events: {0.subscribed_events}".format(self))
access_types = ('forbidden', 'granted', 'subscribed')
access_types_enum = ENUM(*access_types, name='access', create_type=False)
class UserIntervention(db.Model):
__tablename__ = 'user_interventions'
id = db.Column(db.Integer, primary_key=True)
access = db.Column('access', access_types_enum, default='forbidden')
card_html = db.Column(db.Text)
staff_html = db.Column(db.Text)
link_label = db.Column(db.Text)
link_url = db.Column(db.Text)
status_text = db.Column(db.Text)
user_id = db.Column(db.ForeignKey('users.id'), nullable=False)
intervention_id = db.Column(
db.ForeignKey('interventions.id'), nullable=False)
def as_json(self, include_empties=True):
d = {'user_id': self.user_id}
for field in ('access', 'card_html', 'staff_html',
'link_label', 'link_url', 'status_text'):
d[field] = getattr(self, field)
if not include_empties:
return strip_empties(d)
return d
def update_from_json(self, data):
for attr in data:
setattr(self, attr, data[attr])
@classmethod
def user_access_granted(cls, intervention_id, user_id):
"""Shortcut to query for specific (intervention, user) access"""
q = cls.query.filter(and_(
cls.user_id == user_id,
cls.intervention_id == intervention_id,
cls.access == 'granted'))
return q.count() > 0
def intervention_restrictions(user):
"""returns tuple of lists for interventions: (disallow, require)
Users may not have access to some interventions (such as randomized
control trials). In such a case, the first of the tuple items
will name intervention ids which should not be included.
Other users get access to all patients with one or more
interventions. In this case, a list of interventions for which
the user should be granted access is in the second position.
:returns disallow, require::
disallow: list of intervention IDs to exclude associated patients,
such as the randomized control trial interventions.
require: list of intervention IDs if patients must also have the
respective UserIntervention association.
"""
if user.has_role(ROLE.ADMIN.value):
return None, None # no restrictions
disallowed, required = None, None
if user.has_role(ROLE.STAFF.value):
if user.has_role(ROLE.INTERVENTION_STAFF.value):
raise BadRequest(
"Patients list for staff and intervention-staff are "
"mutually exclusive - user shouldn't have both roles")
# staff users aren't to see patients from RCT interventions
disallowed = Intervention.rct_ids()
if user.has_role(ROLE.INTERVENTION_STAFF.value):
# Look up associated interventions
uis = UserIntervention.query.filter(
UserIntervention.user_id == user.id)
# check if the user is associated with any intervention at all
if uis.count() == 0:
raise BadRequest("User is not associated with any intervention.")
required = [ui.intervention_id for ui in uis]
return disallowed, required
STATIC_INTERVENTIONS = {
'analytics': 'Analytics',
'assessment_engine': 'Assessment Engine',
'care_plan': 'Care Plan',
'community_of_wellness': 'Community of Wellness',
'decision_support_p3p': 'Decision Support P3P',
'decision_support_wisercare': 'Decision Support WiserCare',
'music': 'MUSIC Integration',
'psa_tracker': 'PSA Tracker',
'self_management': 'Self Management',
'sexual_recovery': 'Sexual Recovery',
'social_support': 'Social Support Network',
'default': 'OTHER: not yet officially supported',
}
def add_static_interventions():
"""Seed database with default static interventions
Idempotent - run anytime to push any new interventions into existing dbs
"""
for name, description in STATIC_INTERVENTIONS.items():
if not Intervention.query.filter_by(name=name).first():
intervention = Intervention(
name=name, description=description, card_html=description,
subscribed_events=LOGOUT_EVENT)
db.session.add(intervention)
class _NamedInterventions(object):
"""Bunch pattern class to house references to interventions
Don't use this class directly - make reference to its user,
the INTERVENTION instance.
Specialized to handle only Interventions. Attributes
(all without a leading '_') assumed to be interventions and may
be referenced in upper or lower case.
"""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, query_by_name(Intervention, k))
def __getattribute__(self, attr):
if attr.startswith('_'):
return object.__getattribute__(self, attr)
# Catch KeyError in case it's a dynamically added intervention
# (i.e. not from static list)
try:
value = self.__dict__[attr.lower()].__call__(self)
except NoResultFound:
raise AttributeError("Intervention {} not found".format(attr))
except KeyError:
query = Intervention.query.filter_by(name=attr)
if not query.count():
raise AttributeError(
"Intervention {} not found".format(attr))
value = query.one()
return value
def __iter__(self):
for attr in dir(self):
if attr.startswith('_'):
continue
try:
yield getattr(self, attr)
except AttributeError:
# Intervention from static list not found in db, skip
continue
def __contains__(self, item):
try:
self.__getattribute__(item)
return True
except AttributeError:
return False
"""INTERVENTION behaves like a static accessor for all interventions.
Obtain intervention of choice by name in upper or lower case or by string:
sr = INTERVENTION.SEXUAL_RECOVERY
sr = INTERVENTION.sexual_recovery
sr = getattr(INTERVENTION, 'sexual_recovery')
"""
INTERVENTION = _NamedInterventions(**STATIC_INTERVENTIONS)
| 38.159353 | 79 | 0.651516 | 2,025 | 16,523 | 5.163457 | 0.204938 | 0.013389 | 0.018171 | 0.014728 | 0.294855 | 0.252295 | 0.215092 | 0.190704 | 0.158569 | 0.14365 | 0 | 0.00264 | 0.266417 | 16,523 | 432 | 80 | 38.247685 | 0.859995 | 0.29468 | 0 | 0.256098 | 0 | 0 | 0.152304 | 0.011864 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089431 | false | 0 | 0.04878 | 0.00813 | 0.349594 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec1f0874784570485a2cb6d70a6752e769ab9710 | 3,156 | py | Python | utils/extract_features.py | ArjitJ/tbd-nets | 8e93ecad54489706ec3249c9ca5d345d6866e1ba | [
"MIT"
] | 371 | 2018-03-15T00:26:23.000Z | 2022-03-30T14:32:48.000Z | utils/extract_features.py | ArjitJ/tbd-nets | 8e93ecad54489706ec3249c9ca5d345d6866e1ba | [
"MIT"
] | 14 | 2018-03-23T08:03:02.000Z | 2022-02-06T18:39:05.000Z | utils/extract_features.py | ArjitJ/tbd-nets | 8e93ecad54489706ec3249c9ca5d345d6866e1ba | [
"MIT"
] | 81 | 2018-03-15T00:54:46.000Z | 2021-12-07T16:09:58.000Z | # DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
#
# This material is based upon work supported by the Assistant Secretary of Defense for Research and
# Engineering under Air Force Contract No. FA8721-05-C-0002 and/or FA8702-15-D-0001. Any opinions,
# findings, conclusions or recommendations expressed in this material are those of the author(s) and
# do not necessarily reflect the views of the Assistant Secretary of Defense for Research and
# Engineering.
#
# © 2017 Massachusetts Institute of Technology.
#
# MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014)
#
# The software/firmware is provided to you on an As-Is basis
#
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or
# 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are
# defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than
# as specifically authorized by the U.S. Government may violate any copyrights that exist in this
# work.
import torch
import numpy as np
from scipy.misc import imread, imresize
from torchvision.models import resnet101
def load_feature_extractor(model_stage=2):
""" Load the appropriate parts of ResNet-101 for feature extraction.
Parameters
----------
model_stage : Integral
The stage of ResNet-101 from which to extract features.
For 28x28 feature maps, this should be 2. For 14x14 feature maps, 3.
Returns
-------
torch.nn.Sequential
The feature extractor (ResNet-101 at `model_stage`)
Notes
-----
This function will download ResNet-101 if it is not already present through torchvision.
"""
model = resnet101(pretrained=True)
layers = [model.conv1, model.bn1, model.relu, model.maxpool]
layers += [getattr(model, 'layer{}'.format(i+1)) for i in range(model_stage)]
model = torch.nn.Sequential(*layers)
if torch.cuda.is_available():
model.cuda()
return model.eval()
def extract_image_feats(img_path, model):
""" Extract image features from the image at `img_path` using `model`.
Parameters
----------
img_path : Union[pathlib.Path, str]
The path to the image file.
model : torch.nn.Module
The feature extractor to use.
Returns
-------
Tuple[numpy.ndarray, torch.Tensor]
The image and image features extracted from `model`
"""
# read in the image and transform it to shape (1, 3, 224, 224)
path = str(img_path) # to handle pathlib
img = imread(path, mode='RGB')
img = imresize(img, (224, 224), interp='bicubic')
img = img.transpose(2, 0, 1)[None]
# use ImageNet statistics to transform the data
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
img_tensor = torch.FloatTensor((img / 255 - mean) / std)
# push to the GPU if possible
if torch.cuda.is_available():
img_tensor = img_tensor.cuda()
return (img.squeeze().transpose(1, 2, 0), model(img_tensor))
| 36.697674 | 100 | 0.695501 | 468 | 3,156 | 4.653846 | 0.455128 | 0.018365 | 0.016529 | 0.02112 | 0.080808 | 0.050505 | 0.050505 | 0.050505 | 0.050505 | 0 | 0 | 0.063669 | 0.203739 | 3,156 | 85 | 101 | 37.129412 | 0.802626 | 0.630228 | 0 | 0.086957 | 0 | 0 | 0.016441 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.173913 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec1feb9807faf84631629db4a3a466c150882506 | 14,080 | py | Python | ai_solutions/maze_solver.py | zerobits01/AI-Project | 68d5e7a8b2c826644683a08e916f961f878d218f | [
"MIT"
] | null | null | null | ai_solutions/maze_solver.py | zerobits01/AI-Project | 68d5e7a8b2c826644683a08e916f961f878d218f | [
"MIT"
] | null | null | null | ai_solutions/maze_solver.py | zerobits01/AI-Project | 68d5e7a8b2c826644683a08e916f961f878d218f | [
"MIT"
] | null | null | null | import sys
import collections
from queue import PriorityQueue
from itertools import count
from ai_solutions.graph_node import GraphNode
class MazeSolver:
'''
this class is for solving the maze question
using three algorithms:
- BFS
- IDS
- A*
'''
def __init__(self, source, destination, black_cells, size=20):
'''
Initial the source and destination cells
@param source: source cell, two element (row, col)
@type source: tuple
@param destination: dst, two elemets (row, col)
@type destication: tuple
@param BLACKED: list of black cells in the maze like [(row, col), (row, col), etc]
@type type: tuple
@param size: the maze dimention size, this will create size*size maze
@type size: integer
'''
# variables
self.FIRST = (0, 0)
self.LAST = (size - 1, size - 1)
self.BLACKED = set([(x[0], x[1]) for x in black_cells])
self.SRC = GraphNode(None, (source[0], source[1]))
self.DST = GraphNode(None, (destination[0], destination[1]))
self.size = size
print(20*'#' + '\n' + "MazeSolver creation" + '\n' + 20*'#' + '\n')
def create_path(self, node: GraphNode):
"""creates the solution path based on the parents till it visits SRC
@param node: dst node wchich we wanna find path to it from source
@type node: GraphNode
@returns: list of the coordinates to go in correct order
@rtype: list
"""
path = []
cost = 0
while node:
path.insert(0, node.coordinate)
node = node.parent
cost = cost + 1
print(20*'#' + '\n' + "path creation" + '\n' + 20*'#' + '\n')
return path, cost
def is_child_valid(self, node: GraphNode):
'''
checks if the created node is valid or not
@param node: input node to check
@type node: GraphNode
@returns: boolean value which shows if the created node is valid or not
@rtype: bool
'''
if node.coordinate[0] < 0 or node.coordinate[1] < 0 or \
node.coordinate[0] >= self.size or node.coordinate[1] >= self.size or \
node.coordinate == node.parent.coordinate or node.coordinate in self.BLACKED :
return False
return True
def get_children(self, node: GraphNode):
'''
this will create the child nodes then return them as a list
@param node: the node we wanna search for the children
@type node: GraphNode
@returns: list of children
@rtype: list
'''
children = []
try:
b_child = GraphNode(
node, (node.coordinate[0], node.coordinate[1] - 1))
t_child = GraphNode(
node, (node.coordinate[0], node.coordinate[1] + 1))
l_child = GraphNode(
node, (node.coordinate[0] - 1, node.coordinate[1]))
r_child = GraphNode(
node, (node.coordinate[0] + 1, node.coordinate[1]))
if self.is_child_valid(b_child):
children.append(b_child)
if self.is_child_valid(t_child):
children.append(t_child)
if self.is_child_valid(r_child):
children.append(r_child)
if self.is_child_valid(l_child):
children.append(l_child)
print(20*'#' + '\n' + "get children" + '\n' + 20*'#' + '\n')
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return children
def get_specific_child(self, node: GraphNode, which_child):
'''
this will create specific child of the given node
@param node: the node we wanna search for the children
@type node: GraphNode
@param which_child: L-eft, R-ight, B-ottom, T-op
@type which_child: char
@returns: list of children
@rtype: list
'''
try:
if which_child == 'B':
b_child = GraphNode(
node, (node.coordinate[0], node.coordinate[1] - 1))
if self.is_child_valid(b_child):
return b_child
if which_child == 'T':
t_child = GraphNode(
node, (node.coordinate[0], node.coordinate[1] + 1))
if self.is_child_valid(t_child):
return t_child
if which_child == 'L':
l_child = GraphNode(
node, (node.coordinate[0] - 1, node.coordinate[1]))
if self.is_child_valid(l_child):
return l_child
if which_child == 'R':
r_child = GraphNode(
node, (node.coordinate[0] + 1, node.coordinate[1]))
if self.is_child_valid(r_child):
return r_child
return None
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return None
def bfs_graph_search(self):
'''
solve the maze by bfs graph search
@returns : solution path, cost, count of explored set
'''
try:
queue = collections.deque([self.SRC])
explored_set = set()
while queue:
curr = queue.popleft()
if curr == self.DST:
# returning path, cost and explored_set count
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
explored_set.add(curr.coordinate)
# add current cell's child to queue to visit
children = self.get_children(curr)
for child in children:
if child.coordinate not in explored_set:
queue.append(child)
return [], 'Inf', list(explored_set) # no answer found
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return False
def dls_graph_search(self, cut_off):
'''This is the graph search implementation of dls Alg
it is specificly implemented for solving Maze
@returns : solution path, cost, count of explored set or False
'''
if cut_off < 1:
return False
set_limit = 0
for i in range(0, cut_off+1):
set_limit = set_limit + 4 ** i
explored_set = set()
level = 0
try:
level = level+1
curr = self.SRC
explored_set.add(curr.coordinate)
if curr.coordinate == self.DST.coordinate:
path, cost = self.create_path(self.SRC)
return path, cost-1, list(explored_set)
while True:
print(len(explored_set), "\n", set_limit, "\n")
if(len(explored_set) == set_limit):
break
if level == cut_off:
level = level - 1
curr = curr.parent
continue
tmp = self.get_specific_child(curr, 'T')
if tmp and tmp.coordinate not in explored_set:
curr = tmp
print(f"TOP:\t{curr.coordinate}")
level = level + 1
explored_set.add(curr.coordinate)
if curr == self.DST:
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
continue
tmp = self.get_specific_child(curr, 'R')
if tmp and tmp.coordinate not in explored_set:
curr = tmp
print(f"TOP:\t{curr.coordinate}")
level = level + 1
explored_set.add(curr.coordinate)
if curr == self.DST:
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
continue
tmp = self.get_specific_child(curr, 'B')
if tmp and tmp.coordinate not in explored_set:
curr = tmp
print(f"TOP:\t{curr.coordinate}")
level = level + 1
explored_set.add(curr.coordinate)
if curr == self.DST:
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
continue
tmp = self.get_specific_child(curr, 'L')
if tmp and tmp.coordinate not in explored_set:
curr = tmp
print(f"TOP:\t{curr.coordinate}")
level = level + 1
explored_set.add(curr.coordinate)
if curr == self.DST:
path, cost = self.create_path(curr)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
continue
level = level - 1
curr = curr.parent
return False, list(explored_set)
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return False
def ids_graph_search(self):
'''
solve the maze by ids graph search
@returns : solution path, cost, count of explored set or False
'''
try:
explored_set_tmp = []
for cut_off in count(start=1):
print(20*"!@#$")
print(f"new round\t{cut_off}")
print(20*"!@#$")
result = self.dls_graph_search(cut_off)
if result != False and isinstance(result[1], int):
return result
if result == False:
continue
if result[0] == False:
return [], 'Inf', result[1] # no answer found
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return False
def aStar_graph_search(self):
'''
solve the maze by a* graph search
@returns : solution path, cost, count of explored set
'''
try:
to_check = []
explored_set = []
to_check.append(self.SRC)
while to_check:
current_cell = to_check.pop(0)
if current_cell.coordinate not in explored_set:
explored_set.append(current_cell.coordinate)
if current_cell.coordinate == self.DST.coordinate:
path, cost = self.create_path(current_cell)
print(20*'#' + '\n' + "solved" + '\n' + 20*'#' + '\n')
return path, cost-1, list(explored_set)
children = self.get_children(current_cell)
for child in children:
child.payed = abs(child.coordinate[0] - self.SRC.coordinate[0]) + abs(
child.coordinate[1] - self.SRC.coordinate[1])
child.hurestic = abs(child.coordinate[0] - self.DST.coordinate[0]) + abs(
child.coordinate[1] - self.DST.coordinate[1])
child.total = child.payed + child.hurestic
flag = True
for tmp in to_check:
if (child == tmp and child.total >= tmp.total):
flag = False
break
if flag:
if child.coordinate not in explored_set:
to_check.append(child)
to_check = sorted(to_check, key=lambda GraphNode_ob: GraphNode_ob.total)
return [], 'Inf', list(explored_set)
except Exception as e:
print(20*'$')
print(sys.exc_info()[-1].tb_lineno, e)
print(20*'$')
return False
return False
"""
points to pay attention
- set is not serializable so use list at last step
- checking advance debugging in python
"""
# source: https://towardsdatascience.com/a-star-a-search-algorithm-eb495fb156bb
# source: https://www.annytab.com/a-star-search-algorithm-in-python/
# we have to check something important
'''
what if they set the start at 1,1 and the walls arround it?
'''
| 34.594595 | 100 | 0.464063 | 1,488 | 14,080 | 4.283602 | 0.147177 | 0.060402 | 0.015061 | 0.027612 | 0.512708 | 0.478506 | 0.451365 | 0.387041 | 0.376373 | 0.360998 | 0 | 0.019719 | 0.438139 | 14,080 | 406 | 101 | 34.679803 | 0.785994 | 0.144815 | 0 | 0.562232 | 0 | 0 | 0.027379 | 0.008259 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038627 | false | 0 | 0.021459 | 0 | 0.184549 | 0.150215 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec212b4bab89667f0bd94d6a9a7ed05d39f3098c | 4,998 | py | Python | examples/development/simulate_policy_with_state.py | zhanghc12/mopo | f6db90f55ca6becbc34988b88404b289699637da | [
"MIT"
] | 107 | 2020-09-07T01:06:37.000Z | 2022-03-31T04:16:51.000Z | examples/development/simulate_policy_with_state.py | zhanghc12/mopo | f6db90f55ca6becbc34988b88404b289699637da | [
"MIT"
] | 9 | 2020-09-09T06:49:03.000Z | 2022-03-25T18:19:57.000Z | examples/development/simulate_policy_with_state.py | zhanghc12/mopo | f6db90f55ca6becbc34988b88404b289699637da | [
"MIT"
] | 29 | 2020-09-10T16:26:33.000Z | 2022-03-16T08:15:41.000Z | import argparse
from distutils.util import strtobool
import json
import os
import pickle
import numpy as np
import tensorflow as tf
import pdb
from softlearning.environments.utils import get_environment_from_params
from softlearning.policies.utils import get_policy_from_variant
# from softlearning.samplers import rollouts
from softlearning import replay_pools
from softlearning.samplers import (
dummy_sampler,
extra_policy_info_sampler,
remote_sampler,
base_sampler,
simple_sampler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint_path',
type=str,
help='Path to the checkpoint.')
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--num-rollouts', '-n', type=int, default=10)
parser.add_argument('--render-mode', '-r',
type=str,
default=None,
choices=('human', 'rgb_array', None),
help="Mode to render the rollouts in.")
parser.add_argument('--deterministic', '-d',
type=lambda x: bool(strtobool(x)),
nargs='?',
const=True,
default=True,
help="Evaluate policy deterministically.")
args = parser.parse_args()
return args
def rollout(env,
policy,
path_length,
callback=None,
render_mode=None,
break_on_terminal=True):
observation_space = env.observation_space
action_space = env.action_space
pool = replay_pools.SimpleReplayPool(
observation_space, action_space, max_size=path_length)
sampler = simple_sampler.SimpleSampler(
max_path_length=path_length,
min_pool_size=None,
batch_size=None)
sampler.initialize(env, policy, pool)
images = []
infos = []
state_vectors = []
t = 0
for t in range(path_length):
observation, reward, terminal, info = sampler.sample()
state_vector = sampler.env.unwrapped.state_vector()
infos.append(info)
state_vectors.append(state_vector)
if callback is not None:
callback(observation)
if render_mode is not None:
if render_mode == 'rgb_array':
image = env.render(mode=render_mode)
images.append(image)
else:
env.render()
if terminal:
policy.reset()
if break_on_terminal: break
assert pool._size == t + 1
path = pool.batch_by_indices(
np.arange(pool._size),
observation_keys=getattr(env, 'observation_keys', None))
path['infos'] = infos
path['state_vectors'] = np.array([sampler._reset_state_vector] + state_vectors[:-1])
if render_mode == 'rgb_array':
path['images'] = np.stack(images, axis=0)
return path
def rollouts(n_paths, *args, **kwargs):
paths = [rollout(*args, **kwargs) for i in range(n_paths)]
return paths
def simulate_policy(args):
session = tf.keras.backend.get_session()
checkpoint_path = args.checkpoint_path.rstrip('/')
experiment_path = os.path.dirname(checkpoint_path)
variant_path = os.path.join(experiment_path, 'params.json')
with open(variant_path, 'r') as f:
variant = json.load(f)
with session.as_default():
pickle_path = os.path.join(checkpoint_path, 'checkpoint.pkl')
with open(pickle_path, 'rb') as f:
picklable = pickle.load(f)
environment_params = (
variant['environment_params']['evaluation']
if 'evaluation' in variant['environment_params']
else variant['environment_params']['training'])
evaluation_environment = get_environment_from_params(environment_params)
policy = (
get_policy_from_variant(variant, evaluation_environment, Qs=[None]))
policy.set_weights(picklable['policy_weights'])
with policy.set_deterministic(args.deterministic):
paths = rollouts(args.num_rollouts,
evaluation_environment,
policy,
path_length=args.max_path_length,
render_mode=args.render_mode)
#### print rewards
rewards = [path['rewards'].sum() for path in paths]
print('Rewards: {}'.format(rewards))
print('Mean: {}'.format(np.mean(rewards)))
####
if args.render_mode != 'human':
from pprint import pprint; import pdb; pdb.set_trace()
pass
return paths
if __name__ == '__main__':
args = parse_args()
paths = simulate_policy(args)
keys = paths[0].keys()
paths = {key: np.concatenate([path[key] for path in paths]) for key in keys}
print(paths.keys())
print(paths['observations'].shape, paths['state_vectors'].shape)
# pickle.dump(paths, open('data/hopper_state_vectors.pkl', 'wb'))
| 31.2375 | 88 | 0.622649 | 571 | 4,998 | 5.238179 | 0.285464 | 0.033434 | 0.028419 | 0.016048 | 0.013373 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003012 | 0.269308 | 4,998 | 159 | 89 | 31.433962 | 0.815991 | 0.02401 | 0 | 0.065041 | 0 | 0 | 0.088798 | 0 | 0 | 0 | 0 | 0 | 0.00813 | 1 | 0.03252 | false | 0.00813 | 0.105691 | 0 | 0.170732 | 0.04065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec21a2f2b80af73693b16e92ab450451f7f0d1b4 | 2,566 | py | Python | test_mask_rcnn_bdd.py | sxhxliang/challenagerAI-gluon-cv | a0018adad887e9045d3d356b8eb16372323888bf | [
"Apache-2.0"
] | 1 | 2019-12-17T14:18:00.000Z | 2019-12-17T14:18:00.000Z | test_mask_rcnn_bdd.py | sxhxliang/challenagerAI-gluon-cv | a0018adad887e9045d3d356b8eb16372323888bf | [
"Apache-2.0"
] | 1 | 2019-04-29T04:05:51.000Z | 2019-04-29T04:05:51.000Z | test_mask_rcnn_bdd.py | AaronLeong/challenagerAI-gluon-cv | a0018adad887e9045d3d356b8eb16372323888bf | [
"Apache-2.0"
] | 1 | 2019-04-28T11:53:40.000Z | 2019-04-28T11:53:40.000Z | from matplotlib import pyplot as plt
from gluoncv import model_zoo, data, utils
import mxnet as mx
import numpy as np
from PIL import Image
import json
from tqdm import tqdm
# epoch = 1
save_path = '/data1/datasets/bdd100k/testB_result/'
test_path = '/data1/datasets/bdd100k/images/100k/test2018/'
test_json = []
CLASSES = ['traffic light', 'traffic sign', 'person', 'rider', 'bike', 'bus', 'car', 'motor', 'train', 'truck']
_score_thresh = 0.5
ctx = mx.gpu(3)
resize_map = mx.image.ForceResizeAug((1280,720), interp=2)
net = model_zoo.get_model('mask_rcnn_resnet50_v1b_bdd', pretrained=False, pretrained_base=False)
net.load_parameters('bddv4_continuemask_rcnn_resnet50_v1b_bdd_0024.params')
net.collect_params().reset_ctx(ctx)
def save_drivable_map(pred_map, file_id):
drivable_name = file_id + '_drivable_id' + '.png'
mask = mx.nd.softmax(pred_map, axis=2)
mask = mask>0.5
color = np.array([0,1,2])
mask = mask.asnumpy() * color
mask = np.sum(mask, axis=2).astype('uint8')
# print(mask.shape)
img = Image.fromarray(mask)
# img.save()
img.save(save_path + 'seg/' + drivable_name, 'png')
# ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
with open('ai_challenger_adp2018_testb_20180917_t4.txt','r') as f:
for file_id in tqdm(f.readlines()):
file_id = file_id.replace('\n','')
filename = file_id+'.jpg'
x, orig_img = data.transforms.presets.rcnn.load_test(test_path+filename, max_size=1280)
ids, scores, bboxes, drivable_maps = net(x.as_in_context(ctx))
det_id, det_score, det_bbox = [xx[0].asnumpy() for xx in [ids, scores, bboxes]]
mask = drivable_maps[0].transpose((1,2,0)).as_in_context(mx.cpu())
mask = resize_map(mask)
# 保存 图片
save_drivable_map(mask, file_id)
# ids, scores, bboxes
valid = np.where(((det_id >= 0) & (det_score >= _score_thresh)))[0]
det_id = det_id[valid]
det_score = det_score[valid]
det_bbox = det_bbox[valid]
# print(det_score.shape)
for cid, score, bbox in zip(det_id, det_score, det_bbox):
# print(cid)
test_json.append({
"name": filename,
"timestamp": 1000,
"category": CLASSES[int(cid[0])],
"bbox": bbox.tolist(),
"score": float(score[0])
})
print(save_path + 'det4.json')
with open( save_path +'det4.json', 'w') as jsonf:
json.dump(test_json, jsonf)
| 33.763158 | 111 | 0.622759 | 368 | 2,566 | 4.127717 | 0.415761 | 0.02765 | 0.029625 | 0.0316 | 0.026333 | 0.026333 | 0 | 0 | 0 | 0 | 0 | 0.039634 | 0.233048 | 2,566 | 75 | 112 | 34.213333 | 0.732215 | 0.064302 | 0 | 0 | 0 | 0 | 0.145668 | 0.084973 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.137255 | 0 | 0.156863 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec21accf9670823d9e86649f1391298f34bb5e19 | 300 | py | Python | boj/1110.py | byeonggukgong/algorithm | d9283fdbcfe4966cd4de3e394d9fd3aa33d533a8 | [
"MIT"
] | 3 | 2018-03-11T14:10:59.000Z | 2019-01-23T12:34:27.000Z | boj/1110.py | byeonggukgong/algorithm | d9283fdbcfe4966cd4de3e394d9fd3aa33d533a8 | [
"MIT"
] | null | null | null | boj/1110.py | byeonggukgong/algorithm | d9283fdbcfe4966cd4de3e394d9fd3aa33d533a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
if __name__ == '__main__':
input = int(input())
output = 0
temp = input
while True:
temp = temp % 10 * 10 + (int(temp / 10) + temp % 10) % 10
output += 1
if input == temp:
break
print(output)
| 18.75 | 65 | 0.43 | 33 | 300 | 3.666667 | 0.515152 | 0.14876 | 0.132231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076471 | 0.433333 | 300 | 15 | 66 | 20 | 0.635294 | 0.07 | 0 | 0 | 0 | 0 | 0.028881 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec21d79eb0f508eab1e4d4b450e489d5d2153f80 | 1,222 | py | Python | data_load.py | IdanAzuri/tensorflow-generative-model-collections | 321f45f6dc0e7d4321b35a32e2eb7e864c9e0546 | [
"Apache-2.0"
] | null | null | null | data_load.py | IdanAzuri/tensorflow-generative-model-collections | 321f45f6dc0e7d4321b35a32e2eb7e864c9e0546 | [
"Apache-2.0"
] | null | null | null | data_load.py | IdanAzuri/tensorflow-generative-model-collections | 321f45f6dc0e7d4321b35a32e2eb7e864c9e0546 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import scipy.misc
import glob
import scipy
import utils
import tensorflow as tf
""" param """
epoch = 50
batch_size = 64
lr = 0.0002
z_dim = 100
n_critic = 5
gpu_id = 3
''' data '''
# you should prepare your own data in ./data/img_align_celeba
# celeba original size is [218, 178, 3]
def preprocess_fn(img):
crop_size = 108
re_size = 64
img = tf.image.crop_to_bounding_box(img, (218 - crop_size) // 2, (178 - crop_size) // 2, crop_size, crop_size)
img = tf.to_float(tf.image.resize_images(img, [re_size, re_size], method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
return img
sess = utils.session()
# iteration counter
it_cnt, update_cnt = utils.counter()
sess.run(tf.global_variables_initializer())
sess.run(it_cnt)
sess.run(update_cnt)
img_paths = glob.glob('/Users/idan.a/data/celeba/*.jpg')
data_pool = utils.DiskImageData(img_paths, batch_size, shape=[218, 178, 3], preprocess_fn=preprocess_fn)
batch_epoch = len(data_pool) // (batch_size * n_critic)
real_ipt = data_pool.batch()
sess.run(it_cnt)
it_epoch=1
# save_dir="tmp/"
scipy.misc.imsave('sss.png', utils.immerge(real_ipt, 10, 10))
| 22.62963 | 117 | 0.739771 | 203 | 1,222 | 4.17734 | 0.472906 | 0.04717 | 0.056604 | 0.028302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048113 | 0.13257 | 1,222 | 53 | 118 | 23.056604 | 0.751887 | 0.107201 | 0 | 0.0625 | 0 | 0 | 0.035849 | 0.029245 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.25 | 0 | 0.3125 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec2b8f23f26b48259c53767971ede949f67de8b4 | 3,221 | py | Python | xcube/core/gen2/local/writer.py | bcdev/xcube | 9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3 | [
"MIT"
] | 97 | 2018-06-26T13:02:55.000Z | 2022-03-26T21:03:13.000Z | xcube/core/gen2/local/writer.py | bcdev/xcube | 9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3 | [
"MIT"
] | 524 | 2018-11-09T12:00:08.000Z | 2022-03-31T17:00:13.000Z | xcube/core/gen2/local/writer.py | bcdev/xcube | 9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3 | [
"MIT"
] | 15 | 2019-07-09T08:46:03.000Z | 2022-02-07T18:47:34.000Z | # The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Tuple
import xarray as xr
from xcube.core.gridmapping import GridMapping
from xcube.core.normalize import encode_cube
from xcube.core.store import DataStorePool
from xcube.core.store import get_data_store_instance
from xcube.core.store import new_data_writer
from xcube.util.progress import observe_dask_progress
from ..config import OutputConfig
class CubeWriter:
def __init__(self,
output_config: OutputConfig,
store_pool: DataStorePool = None):
self._output_config = output_config
self._store_pool = store_pool
def write_cube(self,
cube: xr.Dataset,
gm: GridMapping) -> Tuple[str, xr.Dataset]:
output_config = self._output_config
dataset = encode_cube(cube, grid_mapping=gm)
with observe_dask_progress('writing cube', 100):
write_params = output_config.write_params or {}
store_params = output_config.store_params or {}
if output_config.store_id:
store_instance = get_data_store_instance(
output_config.store_id,
store_params=store_params,
store_pool=self._store_pool
)
writer = store_instance.store
write_params.update(
writer_id=output_config.writer_id,
**write_params
)
else:
writer = new_data_writer(output_config.writer_id)
write_params.update(**store_params, **write_params)
if not dataset.attrs.get('title'):
# Set fallback title, so we can distinguish
# datasets from stores in xcube-viewer
dataset = dataset.assign_attrs(title=output_config.data_id)
data_id = writer.write_data(
dataset,
data_id=output_config.data_id,
replace=output_config.replace or False,
**write_params
)
return data_id, dataset
| 43.527027 | 81 | 0.671531 | 408 | 3,221 | 5.127451 | 0.387255 | 0.080306 | 0.031071 | 0.025813 | 0.086998 | 0.029637 | 0 | 0 | 0 | 0 | 0 | 0.002983 | 0.271344 | 3,221 | 73 | 82 | 44.123288 | 0.888368 | 0.368519 | 0 | 0.043478 | 0 | 0 | 0.008454 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.195652 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec2f222af58da2bc64e5b28bba59ebdceae5e124 | 4,148 | py | Python | tests/test_subspaces.py | ndem0/ATHENA | 87825ad95de539ac5e816a19922e9d615fabd5b8 | [
"MIT"
] | 33 | 2019-12-05T15:20:26.000Z | 2022-03-27T17:53:57.000Z | tests/test_subspaces.py | ndem0/ATHENA | 87825ad95de539ac5e816a19922e9d615fabd5b8 | [
"MIT"
] | 12 | 2020-03-23T08:54:32.000Z | 2021-11-07T14:33:04.000Z | tests/test_subspaces.py | ndem0/ATHENA | 87825ad95de539ac5e816a19922e9d615fabd5b8 | [
"MIT"
] | 16 | 2019-12-05T14:10:57.000Z | 2021-07-30T14:12:10.000Z | from unittest import TestCase
import numpy as np
from athena.subspaces import Subspaces
class TestUtils(TestCase):
def test_init_W1(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.W1)
def test_init_W2(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.W2)
def test_init_evals(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.evals)
def test_init_evects(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.evects)
def test_init_evals_br(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.evals_br)
def test_init_subs_br(self):
ss = Subspaces(dim=1)
self.assertIsNone(ss.subs_br)
def test_init_dim(self):
ss = Subspaces(dim=1)
self.assertEqual(ss.dim, 1)
def test_fit(self):
ss = Subspaces(dim=1)
with self.assertRaises(NotImplementedError):
ss.fit()
def test_transform(self):
ss = Subspaces(dim=1)
with self.assertRaises(NotImplementedError):
ss.transform(42)
def test_inverse_transform(self):
ss = Subspaces(dim=1)
with self.assertRaises(NotImplementedError):
ss.inverse_transform(10, 10)
def test_partition_01(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=2)
ss.evects = matrix
ss._partition()
np.testing.assert_array_almost_equal(matrix[:, :2], ss.W1)
def test_partition_02(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=2)
ss.evects = matrix
ss._partition()
np.testing.assert_array_almost_equal(matrix[:, 2:], ss.W2)
def test_partition_03(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=2.0)
ss.evects = matrix
with self.assertRaises(TypeError):
ss._partition()
def test_partition_04(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=0)
ss.evects = matrix
with self.assertRaises(ValueError):
ss._partition()
def test_partition_05(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
ss = Subspaces(dim=4)
ss.evects = matrix
with self.assertRaises(ValueError):
ss._partition()
def test_bootstrap_replicate_01(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
weights = np.ones((3, 1)) / 3
ss = Subspaces(dim=1)
wei = ss._bootstrap_replicate(matrix, weights)[1]
np.testing.assert_array_almost_equal(weights, wei)
def test_bootstrap_replicate_02(self):
np.random.seed(42)
matrix = np.random.uniform(-1, 1, 9).reshape(3, 3)
weights = np.ones((3, 1)) / 3
ss = Subspaces(dim=1)
mat = ss._bootstrap_replicate(matrix, weights)[0]
true_matrix = np.array([[-0.88383278, 0.73235229, 0.20223002],
[0.19731697, -0.68796272, -0.68801096],
[-0.25091976, 0.90142861, 0.46398788]])
np.testing.assert_array_almost_equal(true_matrix, mat)
def test_plot_eigenvalues(self):
ss = Subspaces(dim=1)
with self.assertRaises(TypeError):
ss.plot_eigenvalues(figsize=(7, 7), title='Eigenvalues')
def test_plot_eigenvectors(self):
ss = Subspaces(dim=1)
with self.assertRaises(TypeError):
ss.plot_eigenvectors(n_evects=2, title='Eigenvectors')
def test_plot_sufficient_summary(self):
ss = Subspaces(dim=1)
inputs = np.diag(np.ones(3))
outputs = np.ones(3).reshape(3, 1)
with self.assertRaises(TypeError):
ss.plot_sufficient_summary(inputs,
outputs,
figsize=(7, 7),
title='Sufficient_summary_plots')
| 32.40625 | 72 | 0.5892 | 527 | 4,148 | 4.493359 | 0.163188 | 0.059122 | 0.118243 | 0.095017 | 0.668497 | 0.613598 | 0.577703 | 0.547297 | 0.500422 | 0.447635 | 0 | 0.066939 | 0.290501 | 4,148 | 127 | 73 | 32.661417 | 0.737683 | 0 | 0 | 0.490566 | 0 | 0 | 0.011331 | 0.005786 | 0 | 0 | 0 | 0 | 0.188679 | 1 | 0.188679 | false | 0 | 0.028302 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec300133e36d8c7c6858a83eca5e80abaa22a5dc | 1,131 | py | Python | premium/backend/src/baserow_premium/api/license/urls.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | 1 | 2022-01-24T15:12:02.000Z | 2022-01-24T15:12:02.000Z | premium/backend/src/baserow_premium/api/license/urls.py | rasata/baserow | c6e1d7842c53f801e1c96b49f1377da2a06afaa9 | [
"MIT"
] | null | null | null | premium/backend/src/baserow_premium/api/license/urls.py | rasata/baserow | c6e1d7842c53f801e1c96b49f1377da2a06afaa9 | [
"MIT"
] | null | null | null | from django.conf.urls import re_path
from .views import (
AdminLicensesView,
AdminLicenseView,
AdminLicenseFillSeatsView,
AdminRemoveAllUsersFromLicenseView,
AdminLicenseUserView,
AdminLicenseLookupUsersView,
AdminCheckLicense,
)
app_name = "baserow_premium.api.license"
urlpatterns = [
re_path(r"^$", AdminLicensesView.as_view(), name="list"),
re_path(r"^(?P<id>[0-9]+)/$", AdminLicenseView.as_view(), name="item"),
re_path(
r"^(?P<id>[0-9]+)/lookup-users/$",
AdminLicenseLookupUsersView.as_view(),
name="lookup_users",
),
re_path(
r"^(?P<id>[0-9]+)/fill-seats/$",
AdminLicenseFillSeatsView.as_view(),
name="fill_seats",
),
re_path(
r"^(?P<id>[0-9]+)/remove-all-users/$",
AdminRemoveAllUsersFromLicenseView.as_view(),
name="remove_all_users",
),
re_path(
r"^(?P<id>[0-9]+)/check/$",
AdminCheckLicense.as_view(),
name="check",
),
re_path(
r"^(?P<id>[0-9]+)/(?P<user_id>[0-9]+)/$",
AdminLicenseUserView.as_view(),
name="user",
),
]
| 25.133333 | 75 | 0.591512 | 121 | 1,131 | 5.347107 | 0.322314 | 0.074189 | 0.075734 | 0.074189 | 0.126739 | 0.126739 | 0.126739 | 0.05255 | 0 | 0 | 0 | 0.015964 | 0.22458 | 1,131 | 44 | 76 | 25.704545 | 0.721779 | 0 | 0 | 0.25 | 0 | 0 | 0.223696 | 0.158267 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec330b8bd3eee04a448cd277aee3a50bec98e49d | 2,052 | py | Python | redirect_server/main.py | SantaSpeen/gitflic | 14c68092e238d6731863f9db29b304a9fad3e61c | [
"MIT"
] | 2 | 2022-03-16T10:18:38.000Z | 2022-03-16T13:08:51.000Z | redirect_server/main.py | SantaSpeen/gitflic | 14c68092e238d6731863f9db29b304a9fad3e61c | [
"MIT"
] | 1 | 2022-03-16T12:52:28.000Z | 2022-03-16T13:08:30.000Z | redirect_server/main.py | SantaSpeen/gitflic | 14c68092e238d6731863f9db29b304a9fad3e61c | [
"MIT"
] | 4 | 2022-03-16T09:33:05.000Z | 2022-03-30T05:46:58.000Z | """
This is redirect server for https://oauth.gitflic.ru/oauth/authorize
Base URL: https://gitflic.santaspeen.ru/
Author: @SantaSpeen
License: MIT
"""
import json
import random
from string import ascii_letters, digits
from flask import Flask, request, redirect, abort
app = Flask("gitflic oauth redirect")
cache = {}
@app.route("/favicon.ico")
def fav():
return redirect("https://gitflic.ru/static/image/favicon/android-icon-192x192.png", 301)
@app.route("/", methods=["POST"])
def save_code():
headers = request.headers
if headers.get('Cdn-Loop') == "cloudflare":
if headers['Cf-Connecting-Ip'] == "84.47.177.90": # Gitflic server ip
jsn = json.loads(request.get_data())
cache[jsn['state']].update({"code": jsn['code']})
return "ok", 200
abort(403)
@app.route("/<user_code>", methods=["GET"])
def redirect_to_localhost(user_code):
headers = request.headers
if headers.get('Cdn-Loop') == "cloudflare":
ip = headers['Cf-Connecting-Ip']
if cache.get(user_code) is None:
return "Unknown code.", 404
if cache[user_code]['ip'] != ip:
return "Cannot access from your IP.", 403
redirect_url = cache[user_code]['redirect'] + f"?code={cache[user_code]['code']}&state={user_code}"
del cache[user_code]
return redirect(redirect_url)
abort(403)
@app.route("/getstate", methods=["GET"])
def getcode():
headers = request.headers
if headers.get('Cdn-Loop') == "cloudflare":
ip = headers['Cf-Connecting-Ip']
port = request.args.get('port') or abort(401)
if port.isdigit() and 49152 <= int(port) <= 65535:
state = ''.join([random.choice(ascii_letters + digits) for _ in range(random.randint(10, 17))])
cache.update({state: {"ip": ip, "code": None, "redirect": f"http://localhost:{port}/"}})
return {"state": state, "allow_from": ip}, 201
abort(403)
if __name__ == '__main__':
app.run("0.0.0.0", 18948, True)
| 28.109589 | 107 | 0.615984 | 267 | 2,052 | 4.636704 | 0.393258 | 0.051696 | 0.042003 | 0.055735 | 0.164782 | 0.164782 | 0.164782 | 0.164782 | 0.164782 | 0.164782 | 0 | 0.040198 | 0.211988 | 2,052 | 72 | 108 | 28.5 | 0.725417 | 0.07846 | 0 | 0.25 | 0 | 0 | 0.230852 | 0.026781 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0.022727 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec344b7aa9b7b7dc268010d8de19cfb167034b6e | 5,241 | py | Python | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/packaging/language/cpanm.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | 1 | 2020-10-14T00:06:54.000Z | 2020-10-14T00:06:54.000Z | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/packaging/language/cpanm.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | null | null | null | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/packaging/language/cpanm.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | 2 | 2015-08-06T07:45:48.000Z | 2017-01-04T17:47:16.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Franck Cuny <franck@lumberjaph.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: cpanm
short_description: Manages Perl library dependencies.
description:
- Manage Perl library dependencies.
version_added: "1.6"
options:
name:
description:
- The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
required: false
default: null
aliases: ["pkg"]
from_path:
description:
- The local directory from where to install
required: false
default: null
notest:
description:
- Do not run unit tests
required: false
default: false
locallib:
description:
- Specify the install base to install modules
required: false
default: false
mirror:
description:
- Specifies the base URL for the CPAN mirror to use
required: false
default: false
mirror_only:
description:
- Use the mirror's index file instead of the CPAN Meta DB
required: false
default: false
examples:
- code: "cpanm: name=Dancer"
description: Install I(Dancer) perl package.
- code: "cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz"
description: Install version 0.99_05 of the I(Plack) perl package.
- code: "cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib"
description: "Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)"
- code: "cpanm: from_path=/srv/webapps/my_app/src/"
description: Install perl dependencies from local directory.
- code: "cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib"
description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib).
- code: "cpanm: name=Dancer mirror=http://cpan.cpantesters.org/"
description: Install I(Dancer) perl package from a specific mirror
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
author: Franck Cuny
'''
def _is_package_installed(module, name, locallib, cpanm):
cmd = ""
if locallib:
os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
cmd = "%s perl -M%s -e '1'" % (cmd, name)
res, stdout, stderr = module.run_command(cmd, check_rc=False)
if res == 0:
return True
else:
return False
def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm):
# this code should use "%s" like everything else and just return early but not fixing all of it now.
# don't copy stuff like this
if from_path:
cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path)
else:
cmd = "{cpanm} {name}".format(cpanm=cpanm, name=name)
if notest is True:
cmd = "{cmd} -n".format(cmd=cmd)
if locallib is not None:
cmd = "{cmd} -l {locallib}".format(cmd=cmd, locallib=locallib)
if mirror is not None:
cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror)
if mirror_only is True:
cmd = "{cmd} --mirror-only".format(cmd=cmd)
return cmd
def main():
arg_spec = dict(
name=dict(default=None, required=False, aliases=['pkg']),
from_path=dict(default=None, required=False),
notest=dict(default=False, type='bool'),
locallib=dict(default=None, required=False),
mirror=dict(default=None, required=False),
mirror_only=dict(default=False, type='bool'),
)
module = AnsibleModule(
argument_spec=arg_spec,
required_one_of=[['name', 'from_path']],
)
cpanm = module.get_bin_path('cpanm', True)
name = module.params['name']
from_path = module.params['from_path']
notest = module.boolean(module.params.get('notest', False))
locallib = module.params['locallib']
mirror = module.params['mirror']
mirror_only = module.params['mirror_only']
changed = False
installed = _is_package_installed(module, name, locallib, cpanm)
if not installed:
out_cpanm = err_cpanm = ''
cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm)
rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
if rc_cpanm != 0:
module.fail_json(msg=err_cpanm, cmd=cmd)
if err_cpanm and 'is up to date' not in err_cpanm:
changed = True
module.exit_json(changed=changed, binary=cpanm, name=name)
# import module snippets
from ansible.module_utils.basic import *
main()
| 33.170886 | 126 | 0.672582 | 727 | 5,241 | 4.766162 | 0.314993 | 0.037518 | 0.034632 | 0.02886 | 0.2557 | 0.180952 | 0.121212 | 0.084271 | 0.065224 | 0.065224 | 0 | 0.006836 | 0.21847 | 5,241 | 157 | 127 | 33.382166 | 0.839111 | 0.163518 | 0 | 0.185841 | 0 | 0.035398 | 0.495186 | 0.039202 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026549 | false | 0 | 0.00885 | 0 | 0.061947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec34fcd27c4d0849f4617b2ff48d291c0d3e7793 | 5,877 | py | Python | python/pong.py | rajszym/Pong | ddf6e3a9853ce9d3487636f32b408677b4487210 | [
"MIT"
] | null | null | null | python/pong.py | rajszym/Pong | ddf6e3a9853ce9d3487636f32b408677b4487210 | [
"MIT"
] | null | null | null | python/pong.py | rajszym/Pong | ddf6e3a9853ce9d3487636f32b408677b4487210 | [
"MIT"
] | null | null | null | import os, sys, pygame
White = (0xFF, 0xFF, 0xFF)
Red = (0xFF, 0x00, 0x00)
Yellow = (0xFF, 0xFF, 0x00)
Green = (0x00, 0xFF, 0x00)
Blue = (0x00, 0x00, 0xFF)
Black = (0x00, 0x00, 0x00)
BKG = Black
class Player(pygame.Rect):
def __init__(self):
pygame.Rect.__init__(self, (0, 0), PlayerSize)
self.center = ScreenWidth // 2, ScreenHeight - 24
self.speed = 2
def move_left(self):
self.x -= self.speed
if self.colliderect(ball): self.left = ball.right
if self.left < 0: self.left = 0
def move_right(self):
self.x += self.speed
if self.colliderect(ball): self.right = ball.left
if self.right > ScreenWidth: self.right = ScreenWidth
def update(self):
pygame.draw.rect(screen, Yellow, self)
class Ball(pygame.Rect):
def __init__(self):
pygame.Rect.__init__(self, (0, 0), BallSize)
self.center = ScreenWidth // 2, ScreenHeight // 2
self.xspeed = self.yspeed = 1
def move(self):
global score
posx = self.x
pboom = xboom = yboom = False
self.x += self.xspeed
i = self.collidelist(wall.bricks)
brick = wall.bricks.pop(i) if i >= 0 else None
if brick:
xboom = True; score += brick.score
if self.xspeed > 0: self.right = brick.left
if self.xspeed < 0: self.left = brick.right
if self.colliderect(player):
xboom = True; score += 1
if self.xspeed > 0: self.right = player.left
if self.xspeed < 0: self.left = player.right
if self.left < 0:
xboom = True; self.left = 0
if self.right > ScreenWidth:
xboom = True; self.right = ScreenWidth
newx = self.x; self.x = posx
self.y += self.yspeed
i = self.collidelist(wall.bricks)
brick = wall.bricks.pop(i) if i >= 0 else None
if brick:
yboom = True; score += brick.score
if self.yspeed > 0: self.bottom = brick.top
if self.yspeed < 0: self.top = brick.bottom
if self.colliderect(player):
pboom = yboom = True; score += 1
if self.yspeed > 0: self.bottom = player.top
if self.yspeed < 0: self.top = player.bottom
if self.top < 0:
yboom = True; self.top = 0
if self.bottom > ScreenHeight:
yboom = True; self.bottom = ScreenHeight; score -= 1
self.x = newx
if not xboom and not yboom:
i = self.collidelist(wall.bricks)
brick = wall.bricks.pop(i) if i >= 0 else None
if brick:
xboom = yboom = True; score += brick.score
if self.xspeed > 0: self.right = brick.left
if self.xspeed < 0: self.left = brick.right
if self.yspeed > 0: self.bottom = brick.top
if self.yspeed < 0: self.top = brick.bottom
if self.colliderect(player):
xboom = yboom = True; score += 1
if self.xspeed > 0: self.right = player.left
if self.xspeed < 0: self.left = player.right
if self.yspeed > 0: self.bottom = player.top
if self.yspeed < 0: self.top = player.bottom
if pboom and (self.yspeed > 0):
key = pygame.key.get_pressed()
if self.xspeed > 0 and key[pygame.K_LEFT ]: xboom = True
if self.xspeed < 0 and key[pygame.K_RIGHT]: xboom = True
if xboom: self.xspeed = - self.xspeed
if yboom: self.yspeed = - self.yspeed
def update(self):
pygame.draw.rect(screen, Red, self)
class Brick(pygame.Rect):
def __init__(self, x, y):
p = 255 * y // WallHeight
self.color = (0, p, 255 - p)
self.score = WallHeight - y
x = x * BrickWidth // 2 + 1
y = (y + WallPos) * BrickHeight + 1
pygame.Rect.__init__(self, (x, y), (BrickWidth - 2, BrickHeight - 2))
def update(self):
pygame.draw.rect(screen, self.color, self)
class Wall(object):
def __init__(self):
self.bricks = []
for y in range(0, WallHeight, 2):
for x in range( 0, WallWidth * 2, 2):
self.bricks.append(Brick(x, y))
for y in range(1, WallHeight, 2):
for x in range(-1, WallWidth * 2, 2):
self.bricks.append(Brick(x, y))
def update(self):
for brick in self.bricks:
brick.update()
def showInfo():
font = pygame.font.Font(None, 24)
text = font.render(str(score), 1, White)
rect = text.get_rect(); rect = rect.move(ScreenWidth - rect.right - 4, 4)
screen.blit(text, rect)
text = font.render(str(len(wall.bricks)), 1, White)
rect = text.get_rect(); rect = rect.move(rect.left + 4, 4)
screen.blit(text, rect)
def showEnd():
font = pygame.font.SysFont('Verdana', 32)
text = font.render("GAME OVER", 1, White)
rect = text.get_rect(); rect.center = ScreenWidth // 2, ScreenHeight // 2
screen.blit(text, rect)
pygame.display.update();
pygame.time.wait(1000)
def main():
global screen, player, ball, wall, score
screen = pygame.display.set_mode(ScreenSize)
player = Player()
ball = Ball()
wall = Wall()
score = 0
automat = False
while wall.bricks:
pygame.time.delay(3)
for e in pygame.event.get():
if e.type == pygame.QUIT: return
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE: return
if e.type == pygame.KEYDOWN and e.key == pygame.K_a: automat = True
if e.type == pygame.KEYDOWN and e.key == pygame.K_LEFT: automat = False
if e.type == pygame.KEYDOWN and e.key == pygame.K_RIGHT: automat = False
if automat:
if ball.x < player.x: player.move_left()
if ball.x + BallWidth > player.x + PlayerWidth: player.move_right()
else:
key = pygame.key.get_pressed()
if key[pygame.K_LEFT ]: player.move_left()
if key[pygame.K_RIGHT]: player.move_right()
ball.move()
screen.fill(BKG)
player.update()
ball.update()
wall.update()
showInfo()
pygame.display.update()
showEnd()
ScreenSize = ScreenWidth, ScreenHeight = 640, 480
BrickSize = BrickWidth, BrickHeight = 32, 16
WallPos = 3
WallSize = WallWidth, WallHeight = ScreenWidth // BrickWidth, 10
BallSize = BallWidth, BallHeight = 8, 8
PlayerSize = PlayerWidth, PlayerHeight = 64, 4
if __name__ == "__main__":
os.environ["SDL_VIDEO_CENTERED"] = "1"
os.environ["SDL_VIDEODRIVER"] = "windib"
pygame.init()
pygame.display.set_caption("PONG!")
main()
pygame.quit()
| 26.835616 | 75 | 0.652714 | 889 | 5,877 | 4.247469 | 0.152981 | 0.046081 | 0.03178 | 0.034428 | 0.476695 | 0.431939 | 0.392214 | 0.351695 | 0.337924 | 0.302436 | 0 | 0.030277 | 0.207589 | 5,877 | 218 | 76 | 26.958716 | 0.780545 | 0 | 0 | 0.25 | 0 | 0 | 0.011741 | 0 | 0 | 0 | 0.012251 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.005952 | 0 | 0.113095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec375bc4d8ef16784bec0fbac23a1ea72eb34608 | 13,406 | py | Python | RL-snake/main.py | Yashprime1/RL--Snake | 9eb1a6306bad83c263f2fec7752d2d772860990c | [
"MIT"
] | null | null | null | RL-snake/main.py | Yashprime1/RL--Snake | 9eb1a6306bad83c263f2fec7752d2d772860990c | [
"MIT"
] | null | null | null | RL-snake/main.py | Yashprime1/RL--Snake | 9eb1a6306bad83c263f2fec7752d2d772860990c | [
"MIT"
] | null | null | null | import pygame
import random
import random
import itertools
import json
import os
import matplotlib.pyplot as plt
from IPython import display
plt.ion()
def plot(scores, mean_scores,save=False):
display.clear_output(wait=True)
display.display(plt.gcf())
plt.clf()
plt.title('Training...')
plt.xlabel('Number of Games')
plt.ylabel('Score')
plt.plot(scores)
plt.plot(mean_scores)
plt.ylim(ymin=0)
plt.text(len(scores)-1, scores[-1], str(scores[-1]))
plt.text(len(mean_scores)-1, mean_scores[-1], str(mean_scores[-1]))
plt.show(block=False)
plt.pause(.1)
if save==True:
plt.savefig('qlearning_main.png')
###### Initialization ######
# For screen
dimension_x=200
dimension_y=200
# For snake
snake_color=(0,0,255) #blue
head_x=int(dimension_x/2)
head_y=int(dimension_y/2)
snake_body=[(head_x,head_y)]
# For food
food_color=(255,0,0) #Red
food_x=random.randrange(0,dimension_x,10)
food_y=random.randrange(0,dimension_y,10)
# For RL Agent
states = list(itertools.product(*[(0, 1)] * 11))
actions=[0,1,2]
q_table={}
for state in states:
for action in actions:
q_table[(*state,action)]=0
try:
with open('./q_table_main.json') as json_file:
if os.stat("./q_table_main.json").st_size != 0:
q_table = json.load(json_file)
q_table=dict((tuple(map(int,k.split(","))), v) for k,v in q_table.items())
else:
print("File empty")
except OSError:
print("File not found")
epsilon=0.7
LR=0.1
discount=0.6
state_next=()
reward_next=0
action=1 # initially right
t=1
max_games=10000
# For game
black=(0,0,0)
scores=[]
mean_scores=[]
record=0
score=0
n_games=1
total_score=0
game_over= False
current_direction = pygame.K_RIGHT
try:
with open('./savedparams_main.json') as json_file:
if os.stat("./savedparams_main.json").st_size != 0:
saved_params = json.load(json_file)
epsilon=saved_params["epsilon"]
LR=saved_params["LR"]
discount=saved_params["discount"]
state_next=saved_params["state_next"]
reward_next=saved_params["reward_next"]
action=saved_params["action"]
max_games=saved_params["max_games"]
scores=saved_params["scores"]
mean_scores=saved_params["mean_scores"]
record=saved_params["record"]
score=saved_params["score"]
n_games=saved_params["n_games"]
total_score=saved_params["total_scores"]
current_direction=saved_params["current_direction"]
head_x=saved_params["head_x"]
head_y=saved_params["head_y"]
snake_body=saved_params["snake_body"]
food_x=saved_params["food_x"]
food_y=saved_params["food_y"]
else:
print("File empty")
except OSError:
print("File not found")
pygame.init()
display_board=pygame.display.set_mode((dimension_x,dimension_y))
pygame.display.update()
###### Game ######
# place food on screen
def place_food():
global dimension_x,dimension_y,display_board,food_color,food_x,food_y,snake_body
food_x=random.randrange(0,dimension_x,10)
food_y=random.randrange(0,dimension_y,10)
if((food_x,food_y) in snake_body) : place_food()
pygame.draw.rect(display_board,food_color,[food_x,food_y,10,10])
pygame.display.update()
def show_score():
global score,display_board
"""system font"""
font = pygame.font.SysFont("Segoe UI", 35)
textsurface = font.render("Score :{} ".format(score), False,(0,255,0)) # "text", antialias, color
display_board.blit(textsurface, (0, 0))
# move snake on screen
def move_snake(speed=10):
global game_over,black,head_x,head_y,snake_body,snake_color,food_x,food_y,current_direction,record,score,display_board
reward=0
# remove previous snake from screen
for (x,y) in snake_body:
pygame.draw.rect(display_board,black,[x,y,10,10])
pygame.display.update()
if current_direction == pygame.K_LEFT:
head_x -= speed
if current_direction == pygame.K_RIGHT:
head_x += speed
if current_direction == pygame.K_UP:
head_y -= speed
if current_direction == pygame.K_DOWN:
head_y += speed
if snake_collided(head_x,head_y):
reward-=10
init_game()
if head_x==food_x and head_y==food_y:
snake_body.insert(0,(head_x,head_y))
score+=1
reward+=10
display_board.fill(black)
place_food()
else:
reward+=(-0.01)
snake_body.insert(0,(head_x,head_y))
snake_body.pop()
# add new snake from screen
for (x,y) in snake_body:
pygame.draw.rect(display_board,snake_color,[x,y,10,10])
pygame.display.update()
return reward
def snake_collided(x,y):
global dimension_x,dimension_y,snake_body
if x<0 or y<0 or x>dimension_x or y>dimension_y or (x,y) in snake_body[1:]:
return True
return False
def init_game():
global head_x,head_y,snake_body,food_x,food_y,current_direction,display_board,score,mean_scores,scores,total_score,epsilon,n_games,record,max_games
if score>record:
record=score
print('Game ', n_games, 'Score', score, 'Record:', record)
scores.append((score))
total_score += score
mean_score = total_score / n_games
n_games+=1
mean_scores.append(mean_score)
plot(scores, mean_scores)
if(epsilon>(n_games/max_games)): epsilon-=(n_games/max_games)
display_board.fill(black)
head_x=int(dimension_x/2)
head_y=int(dimension_y/2)
snake_body=[(head_x,head_y)]
food_x=random.randrange(0,dimension_x,10)
food_y=random.randrange(0,dimension_y,10)
current_direction = pygame.K_RIGHT
score=0
if(epsilon>0): epsilon-=0.0001
place_food()
###### RL Agent ######
def play_agent(state):
global q_table,epsilon,actions
print("Agent : ")
# step 2: choose action e-greedy
if random.random()<epsilon:
max_action = random.randint(0,len(actions)-1)
print("Exploration : ",max_action)
return max_action
else:
max_action=-1
max_qvalue=0
# t_exploit+=1
for action in actions:
if(q_table[(*state,action)]>max_qvalue):
max_qvalue=q_table[(*state,action)]
max_action=action
if max_action==-1:
max_action = random.randint(0,len(actions)-1)
print("Exploitation : ",max_action)
# if t_exploit>100:
# if n_games>100:
# epsilon=0.1
# else:
# epsilon=0.4
# t_exploit=1
return max_action
def update_qtable(state,action,state_next,reward_next):
global q_table,epsilon,actions,LR,discount
# step 3: update q_table
max_action=0
max_qvalue=0
for act in actions:
if(q_table[(*state_next,act)]>max_qvalue):
max_qvalue=q_table[(*state_next,act)]
max_action=act
q_next = q_table[(*state_next,max_action)]
q_table[(*state,action)] = q_table[(*state,action)] + LR * (reward_next + discount*q_next - q_table[(*state,action)] )
def take_action(action):
global current_direction
key_actions=[pygame.K_LEFT,pygame.K_UP,pygame.K_RIGHT,pygame.K_DOWN]
if(action==0): pass
elif(action==1):
current_direction=key_actions[(key_actions.index(current_direction)-1) % len(key_actions)]
elif(action==2):
current_direction=key_actions[(key_actions.index(current_direction)+1) % len(key_actions)]
def get_state(speed=10,lookahead=1):
global current_direction,head_x,head_y
points_l=[]
points_r=[]
points_u=[]
points_d=[]
for look in range(1,lookahead+1):
point_l = (head_x-look*speed,head_y)
point_r = (head_x+look*speed,head_y)
point_u = (head_x,head_y-look*speed)
point_d = (head_x,head_y+look*speed)
points_l.append(point_l)
points_r.append(point_r)
points_u.append(point_u)
points_d.append(point_d)
dir_l = current_direction == pygame.K_LEFT
dir_r = current_direction == pygame.K_RIGHT
dir_u = current_direction == pygame.K_UP
dir_d = current_direction == pygame.K_DOWN
danger_straight=False
danger_right=False
danger_left=False
# Danger Straight
for look in range(lookahead):
danger_straight = (dir_r and snake_collided(points_r[look][0],points_r[look][1])) or (dir_l and snake_collided(points_l[look][0],points_l[look][1])) or (dir_u and snake_collided(points_u[look][0],points_u[look][1])) or (dir_d and snake_collided(points_d[look][0],points_d[look][1]))
if danger_straight==True:
break
# Danger Right
for look in range(lookahead):
danger_right = (dir_r and snake_collided(points_d[look][0],points_d[look][1])) or (dir_d and snake_collided(points_l[look][0],points_l[look][1])) or (dir_l and snake_collided(points_u[look][0],points_u[look][1])) or (dir_u and snake_collided(points_r[look][0],points_r[look][1]))
if danger_right==True:
break
# Danger Left
for look in range(lookahead):
danger_left = (dir_r and snake_collided(points_u[look][0],points_u[look][1])) or (dir_u and snake_collided(points_l[look][0],points_l[look][1])) or (dir_l and snake_collided(points_d[look][0],points_d[look][1])) or (dir_d and snake_collided(points_r[look][0],points_r[look][1]))
if danger_left==True:
break
state= (
# Danger straight
danger_straight ,
# Danger right
danger_right,
# Danger left
danger_left,
# Move direction
dir_l,
dir_r,
dir_u,
dir_d,
# Food location
food_x<head_x,
food_x>head_x,
food_y<head_y,
food_y<head_y,
)
return state
clock = pygame.time.Clock()
place_food()
try:
while not game_over:
show_score()
for event in pygame.event.get():
print(food_x,food_y,head_x,head_y)
if event.type==pygame.QUIT:
plot(scores, mean_scores,save=True)
# the json file where the output must be stored
print(q_table,"It is getting saved\n")
q_table=dict((str(','.join(map(str, k))), v) for k,v in q_table.items())
pygame.image.save(display_board, "./snapshot_main.jpeg")
out_file = open("./q_table_main.json", "w")
json.dump(q_table, out_file)
out_file.close()
saved_params={}
saved_params["epsilon"]=epsilon
saved_params["LR"]=LR
saved_params["discount"]=discount
saved_params["state_next"]=state_next
saved_params["reward_next"]=reward_next
saved_params["action"]=action
saved_params["max_games"]=max_games
saved_params["scores"]=scores
saved_params["mean_scores"]=mean_scores
saved_params["record"]=record
saved_params["score"]=score
saved_params["n_games"]=n_games
saved_params["total_scores"]=total_score
saved_params["current_direction"]=current_direction
saved_params["head_x"]=head_x
saved_params["head_y"]=head_y
saved_params["snake_body"]=snake_body
saved_params["food_x"]=food_x
saved_params["food_y"]=food_y
out_file = open("./savedparams_main.json", "w")
json.dump(q_table, out_file)
out_file.close()
game_over=True
# agent playing
state=get_state()
action = play_agent(state)
take_action(action)
reward=move_snake()
next_state=get_state()
update_qtable(state,action,next_state,reward)
t+=1
clock.tick(30)
except KeyboardInterrupt:
plot(scores, mean_scores,save=True)
# the json file where the output must be stored
print(q_table,"It is getting saved\n")
q_table=dict((str(','.join(map(str, k))), v) for k,v in q_table.items())
pygame.image.save(display_board, "./snapshot_main.jpeg")
out_file = open("./q_table_main.json", "w")
json.dump(q_table, out_file)
out_file.close()
saved_params={}
saved_params["epsilon"]=epsilon
saved_params["LR"]=LR
saved_params["discount"]=discount
saved_params["state_next"]=state_next
saved_params["reward_next"]=reward_next
saved_params["action"]=action
saved_params["max_games"]=max_games
saved_params["scores"]=scores
saved_params["mean_scores"]=mean_scores
saved_params["record"]=record
saved_params["score"]=score
saved_params["n_games"]=n_games
saved_params["total_scores"]=total_score
saved_params["current_direction"]=current_direction
saved_params["head_x"]=head_x
saved_params["head_y"]=head_y
saved_params["snake_body"]=snake_body
saved_params["food_x"]=food_x
saved_params["food_y"]=food_y
out_file = open("./savedparams_main.json", "w")
json.dump(saved_params, out_file)
out_file.close()
game_over=True
pygame.quit()
quit()
| 31.84323 | 292 | 0.635313 | 1,926 | 13,406 | 4.15161 | 0.117342 | 0.083917 | 0.015758 | 0.015008 | 0.544772 | 0.458854 | 0.415083 | 0.395573 | 0.358304 | 0.347549 | 0 | 0.017829 | 0.23855 | 13,406 | 420 | 293 | 31.919048 | 0.765478 | 0.046546 | 0 | 0.365559 | 0 | 0 | 0.069256 | 0.007249 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030211 | false | 0.003021 | 0.024169 | 0 | 0.072508 | 0.033233 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec39ff5313fd7c3fd2b71acde83c9cf8579b9481 | 4,999 | py | Python | backend/boards/models.py | xelam11/TaskPlanner | c1e940d2a9babba7721c0f2d261e1c7df9c48581 | [
"BSD-3-Clause"
] | null | null | null | backend/boards/models.py | xelam11/TaskPlanner | c1e940d2a9babba7721c0f2d261e1c7df9c48581 | [
"BSD-3-Clause"
] | null | null | null | backend/boards/models.py | xelam11/TaskPlanner | c1e940d2a9babba7721c0f2d261e1c7df9c48581 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from users.models import CustomUser
class BoardManager(models.Manager):
def create_board(self, author, **kwargs):
current_user = author
board = Board.objects.create(author=current_user, **kwargs)
ParticipantInBoard.objects.create(board=board,
participant=current_user,
is_moderator=True)
for color in Tag.Color.values:
Tag.objects.create(color=color, board=board)
return board
class Board(models.Model):
name = models.CharField(max_length=50,
verbose_name='Название',
help_text='Напишите название',
)
description = models.TextField(verbose_name='Оисание',
help_text='Напишите описание',
blank=True,
)
avatar = models.ImageField(upload_to='board_avatars',
blank=True,
verbose_name='Аватар',
help_text='Загрузите аватар'
)
author = models.ForeignKey(CustomUser,
on_delete=models.CASCADE,
related_name='boards_author',
verbose_name='Автор',
)
participants = models.ManyToManyField(CustomUser,
through='ParticipantInBoard',
related_name='boards_participants',
blank=True,
verbose_name='Участники',
)
objects = BoardManager()
class Meta:
verbose_name = 'Доска'
verbose_name_plural = 'Доски'
ordering = ['name']
def __str__(self):
return self.name
class Tag(models.Model):
class Color(models.IntegerChoices):
RED = 1
ORANGE = 2
YELLOW = 3
GREEN = 4
BLUE = 5
PURPLE = 6
color_to_hex = {
Color.RED: '#f35a5a',
Color.ORANGE: '#ff9b63',
Color.YELLOW: '#fdff97',
Color.GREEN: '#9bc665',
Color.BLUE: '#67b5fd',
Color.PURPLE: '#c173ff'
}
name = models.CharField(max_length=20,
verbose_name='Название тега',
help_text='Напишите название тега',
blank=True,
default=''
)
color = models.PositiveSmallIntegerField(choices=Color.choices
)
board = models.ForeignKey(Board,
on_delete=models.CASCADE,
related_name='tags',
verbose_name='Доска',
)
@property
def hex(self):
return Tag.color_to_hex[self.color]
class Meta:
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
def __str__(self):
return f"Color: '{self.color}', name: '{self.name}'"
class Favorite(models.Model):
user = models.ForeignKey(CustomUser,
on_delete=models.CASCADE,
related_name='favorite_subscriber',
verbose_name='Пользователь',
)
board = models.ForeignKey(Board,
on_delete=models.CASCADE,
related_name='favorite_board',
verbose_name='Доска',
)
pub_date = models.DateTimeField(auto_now_add=True,
verbose_name='Дата добавления',
)
class Meta:
verbose_name = 'Избранный'
verbose_name_plural = 'Избранные'
constraints = [models.UniqueConstraint(
fields=['user', 'board'],
name='unique_favorites_boards')]
def __str__(self):
return (f'Пользователь: {self.user}, '
f'избранные доски: {self.board.name}')
class ParticipantInBoard(models.Model):
board = models.ForeignKey(Board,
on_delete=models.CASCADE,
verbose_name='Доска',
)
participant = models.ForeignKey(CustomUser,
on_delete=models.CASCADE,
verbose_name='Участник'
)
is_moderator = models.BooleanField(default=False)
class Meta:
verbose_name = 'Участник в доске'
verbose_name_plural = 'Участники в досках'
def __str__(self):
return f'Участник: {self.participant} => {self.board}'
| 34.475862 | 77 | 0.468894 | 395 | 4,999 | 5.736709 | 0.313924 | 0.097087 | 0.03707 | 0.055605 | 0.207855 | 0.160635 | 0.150927 | 0.123124 | 0.102383 | 0.051192 | 0 | 0.010152 | 0.44829 | 4,999 | 144 | 78 | 34.715278 | 0.811458 | 0 | 0 | 0.201681 | 0 | 0 | 0.112823 | 0.004601 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05042 | false | 0 | 0.016807 | 0.042017 | 0.336134 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec3a1ba3d6ddeb0aa249439a8c2888cd61f0d04f | 1,605 | py | Python | extractcondo.py | mattyschell/geodatabase-buildings-condoetl | b9be05f749d3c1d7c7dec2f86bbc8776141a93bf | [
"CC0-1.0"
] | null | null | null | extractcondo.py | mattyschell/geodatabase-buildings-condoetl | b9be05f749d3c1d7c7dec2f86bbc8776141a93bf | [
"CC0-1.0"
] | 4 | 2021-08-31T18:27:25.000Z | 2021-09-17T20:26:43.000Z | extractcondo.py | mattyschell/geodatabase-buildings-condoetl | b9be05f749d3c1d7c7dec2f86bbc8776141a93bf | [
"CC0-1.0"
] | null | null | null | import os
import logging
import time
import pathlib
import condo
def main(sourcesdeconn
,outputdir):
sourcecondo = condo.Condo()
sourcecondo.extracttofile('DOF_TAXMAP.Condo'
,outputdir)
return sourcecondo.countcondos()
if __name__ == '__main__':
psourcesdeconn = os.environ['SDEFILE']
timestr = time.strftime("%Y%m%d-%H%M%S")
retval = 1
try:
targetlog = os.path.join(os.environ['TARGETLOGDIR']
,'extractcondo-{0}.log'.format(timestr))
except:
targetlog = os.path.join(os.getcwd()
,'extractcondo-{0}.log'.format(timestr))
logging.basicConfig(filename=targetlog
,level=logging.INFO)
datadir = os.path.join(pathlib.Path(__file__).parent
,'data')
if not os.path.isfile(psourcesdeconn):
logging.error("Condo source sde file {0} does not exist, "
"check SDEFILE environmental".format(psourcesdeconn))
exit(retval)
kount = main(psourcesdeconn
,datadir)
# at this point our csv still has two bad duplicate types
# condo_base_bbl condo_billing_bbl
# A X
# A X
# B Y
# B Z
if (kount == 0 or kount is None):
logging.error('Failed to extract any condos')
else:
logging.info('Successfully extracted {0} bbls to data directory'.format(kount))
retval = 0
exit(retval) | 25.887097 | 87 | 0.545794 | 167 | 1,605 | 5.143713 | 0.54491 | 0.027939 | 0.034924 | 0.044237 | 0.116414 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006783 | 0.357009 | 1,605 | 62 | 88 | 25.887097 | 0.825581 | 0.11215 | 0 | 0.108108 | 0 | 0 | 0.173362 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.135135 | 0 | 0.189189 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec3b0f1dfb39b25fc236995951ab1bc52cc30f8b | 690 | py | Python | MarketData/testMKup.py | yirencaifu/pyWindMongoDB | df1bf3d0bfe6872a5d18fa2f72beccafd7c93b73 | [
"MIT"
] | null | null | null | MarketData/testMKup.py | yirencaifu/pyWindMongoDB | df1bf3d0bfe6872a5d18fa2f72beccafd7c93b73 | [
"MIT"
] | null | null | null | MarketData/testMKup.py | yirencaifu/pyWindMongoDB | df1bf3d0bfe6872a5d18fa2f72beccafd7c93b73 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 27 08:11:48 2014
@author: space_000
"""
from scipy.io import loadmat
from WindPy import w
import pymongo as mg
from wsiTools import findDate
from mgWsi import upiter
d=loadmat('D:\FieldSHSZ')
Field=d['Field'].tolist()
stride=100
numF=range(len(Field))[::stride]
dt=loadmat('D:\dataTime')
timeD=dt['time']
times=findDate(timeD,'20140925',30)
w.start()
client=mg.MongoClient()
db=client['MKD']
col=db['minData']
for t in times:
for f in numF:
data=w.wsi(Field[f:f+stride],'open,high,low,close,volume',str(t),str(t)+'15:01:00','showblank=0',barsize=1).Data[1:]
uniField=set(data[0])
upiter(data,uniField,t,col)
| 20.294118 | 124 | 0.678261 | 116 | 690 | 4.025862 | 0.62931 | 0.034261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065327 | 0.134783 | 690 | 33 | 125 | 20.909091 | 0.716918 | 0.113043 | 0 | 0 | 0 | 0 | 0.157285 | 0.043046 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.238095 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec3ed91b0547d338c06ce42617025e071787d68a | 4,585 | py | Python | dokdo/api/beta_3d_plot.py | sbslee/dokdo | a528a830b3347c39e1dc415b0f3e2c6ad60b0a1d | [
"MIT"
] | 23 | 2020-11-01T21:55:30.000Z | 2021-12-05T14:03:05.000Z | dokdo/api/beta_3d_plot.py | sbslee/dokdo | a528a830b3347c39e1dc415b0f3e2c6ad60b0a1d | [
"MIT"
] | 25 | 2020-11-25T23:24:23.000Z | 2022-03-30T04:40:45.000Z | dokdo/api/beta_3d_plot.py | sbslee/dokdo | a528a830b3347c39e1dc415b0f3e2c6ad60b0a1d | [
"MIT"
] | 7 | 2020-11-27T06:46:47.000Z | 2021-09-25T03:26:07.000Z | from . import common
import pandas as pd
import matplotlib.pyplot as plt
from skbio.stats.ordination import OrdinationResults
from qiime2 import Artifact
def beta_3d_plot(
artifact, metadata=None, hue=None, azim=-60, elev=30, s=80, ax=None,
figsize=None, hue_order=None
):
"""
Create a 3D scatter plot from PCoA results.
+---------------------+---------------------------------------------------+
| q2-diversity plugin | Example |
+=====================+===================================================+
| QIIME 2 CLI | qiime diversity pcoa [OPTIONS] |
+---------------------+---------------------------------------------------+
| QIIME 2 API | from qiime2.plugins.diversity.methods import pcoa |
+---------------------+---------------------------------------------------+
Parameters
----------
artifact : str or qiime2.Artifact
Artifact file or object from the q2-diversity plugin with the
semantic type ``PCoAResults`` or
``PCoAResults % Properties('biplot')``.
metadata : str or qiime2.Metadata, optional
Metadata file or object.
hue : str, optional
Grouping variable that will produce points with different colors.
azim : int, default: -60
Azimuthal viewing angle.
elev : int, default: 30
Elevation viewing angle.
s : float, default: 80.0
Marker size.
ax : matplotlib.axes.Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
hue_order : list, optional
Specify the order of categorical levels of the 'hue' semantic.
Returns
-------
matplotlib.axes.Axes
Axes object with the plot drawn onto it.
See Also
--------
dokdo.api.ordinate
dokdo.api.beta_2d_plot
dokdo.api.beta_scree_plot
dokdo.api.beta_parallel_plot
dokdo.api.addbiplot
Examples
--------
Below is a simple example:
.. code:: python3
import dokdo
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
qza_file = '/Users/sbslee/Desktop/dokdo/data/moving-pictures-tutorial/unweighted_unifrac_pcoa_results.qza'
metadata_file = '/Users/sbslee/Desktop/dokdo/data/moving-pictures-tutorial/sample-metadata.tsv'
dokdo.beta_3d_plot(qza_file,
metadata_file,
'body-site',
figsize=(8, 8))
plt.tight_layout()
.. image:: images/beta_3d_plot-1.png
We can control the camera angle with ``elev`` and ``azim``:
.. code:: python3
fig = plt.figure(figsize=(14, 7))
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
ax2 = fig.add_subplot(1, 2, 2, projection='3d')
dokdo.beta_3d_plot(qza_file,
metadata_file,
ax=ax1,
hue='body-site',
elev=15)
dokdo.beta_3d_plot(qza_file,
metadata_file,
ax=ax2,
hue='body-site',
azim=70)
plt.tight_layout()
.. image:: images/beta_3d_plot-2.png
"""
if isinstance(artifact, str):
_pcoa_results = Artifact.load(artifact)
else:
_pcoa_results = artifact
ordination_results = _pcoa_results.view(OrdinationResults)
df = ordination_results.samples.iloc[:, :3]
df.columns = ['A1', 'A2', 'A3']
props = ordination_results.proportion_explained
if metadata is None:
df = df
else:
mf = common.get_mf(metadata)
df = pd.concat([df, mf], axis=1, join='inner')
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.view_init(azim=azim, elev=elev)
if hue is None:
ax.scatter(df['A1'], df['A2'], df['A3'], s=s)
else:
if hue_order is None:
_hue_order = df[hue].unique()
else:
_hue_order = hue_order
for label in _hue_order:
a = df[df[hue] == label]
ax.scatter(a['A1'], a['A2'], a['A3'], label=label, s=s)
ax.set_xlabel(f'Axis 1 ({props[0]*100:.2f} %)')
ax.set_ylabel(f'Axis 2 ({props[1]*100:.2f} %)')
ax.set_zlabel(f'Axis 3 ({props[2]*100:.2f} %)')
ax.legend()
return ax
| 32.062937 | 114 | 0.532606 | 533 | 4,585 | 4.470919 | 0.35272 | 0.0235 | 0.025178 | 0.018884 | 0.153588 | 0.118338 | 0.118338 | 0.118338 | 0.074696 | 0 | 0 | 0.027069 | 0.299019 | 4,585 | 142 | 115 | 32.288732 | 0.714375 | 0.615921 | 0 | 0.097561 | 0 | 0 | 0.077295 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.121951 | 0 | 0.170732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec3edfbe3bdf65ebc6cecba5e84ff7e63361940c | 9,682 | py | Python | Human-Falling-Detect-Tracks/Actionsrecognition/Models.py | mathemusician/IncidenceReporting | d1cb747ecdbad641276981133a78813f878c89f3 | [
"MIT"
] | 2 | 2021-10-02T19:56:32.000Z | 2021-10-02T21:31:26.000Z | Human-Falling-Detect-Tracks/Actionsrecognition/Models.py | mathemusician/IncidenceReporting | d1cb747ecdbad641276981133a78813f878c89f3 | [
"MIT"
] | null | null | null | Human-Falling-Detect-Tracks/Actionsrecognition/Models.py | mathemusician/IncidenceReporting | d1cb747ecdbad641276981133a78813f878c89f3 | [
"MIT"
] | 1 | 2021-10-02T21:46:08.000Z | 2021-10-02T21:46:08.000Z | ### Reference from: https://github.com/yysijie/st-gcn/tree/master/net
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from Actionsrecognition.Utils import Graph
class GraphConvolution(nn.Module):
"""The basic module for applying a graph convolution.
Args:
- in_channel: (int) Number of channels in the input sequence data.
- out_channels: (int) Number of channels produced by the convolution.
- kernel_size: (int) Size of the graph convolving kernel.
- t_kernel_size: (int) Size of the temporal convolving kernel.
- t_stride: (int, optional) Stride of the temporal convolution. Default: 1
- t_padding: (int, optional) Temporal zero-padding added to both sides of
the input. Default: 0
- t_dilation: (int, optional) Spacing between temporal kernel elements. Default: 1
- bias: (bool, optional) If `True`, adds a learnable bias to the output.
Default: `True`
Shape:
- Inputs x: Graph sequence in :math:`(N, in_channels, T_{in}, V)`,
A: Graph adjacency matrix in :math:`(K, V, V)`,
- Output: Graph sequence out in :math:`(N, out_channels, T_{out}, V)`
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
t_kernel_size=1,
t_stride=1,
t_padding=0,
t_dilation=1,
bias=True,
):
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(
in_channels,
out_channels * kernel_size,
kernel_size=(t_kernel_size, 1),
padding=(t_padding, 0),
stride=(t_stride, 1),
dilation=(t_dilation, 1),
bias=bias,
)
def forward(self, x, A):
x = self.conv(x)
n, kc, t, v = x.size()
x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v)
x = torch.einsum("nkctv,kvw->nctw", (x, A))
return x.contiguous()
class st_gcn(nn.Module):
"""Applies a spatial temporal graph convolution over an input graph sequence.
Args:
- in_channels: (int) Number of channels in the input sequence data.
- out_channels: (int) Number of channels produced by the convolution.
- kernel_size: (tuple) Size of the temporal convolving kernel and
graph convolving kernel.
- stride: (int, optional) Stride of the temporal convolution. Default: 1
- dropout: (int, optional) Dropout rate of the final output. Default: 0
- residual: (bool, optional) If `True`, applies a residual mechanism.
Default: `True`
Shape:
- Inputs x: Graph sequence in :math: `(N, in_channels, T_{in}, V)`,
A: Graph Adjecency matrix in :math: `(K, V, V)`,
- Output: Graph sequence out in :math: `(N, out_channels, T_{out}, V)`
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, dropout=0, residual=True
):
super().__init__()
assert len(kernel_size) == 2
assert kernel_size[0] % 2 == 1
padding = ((kernel_size[0] - 1) // 2, 0)
self.gcn = GraphConvolution(in_channels, out_channels, kernel_size[1])
self.tcn = nn.Sequential(
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
out_channels, out_channels, (kernel_size[0], 1), (stride, 1), padding
),
nn.BatchNorm2d(out_channels),
nn.Dropout(dropout, inplace=True),
)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=(stride, 1)),
nn.BatchNorm2d(out_channels),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A):
res = self.residual(x)
x = self.gcn(x, A)
x = self.tcn(x) + res
return self.relu(x)
class StreamSpatialTemporalGraph(nn.Module):
"""Spatial temporal graph convolutional networks.
Args:
- in_channels: (int) Number of input channels.
- graph_args: (dict) Args map of `Actionsrecognition.Utils.Graph` Class.
- num_class: (int) Number of class outputs. If `None` return pooling features of
the last st-gcn layer instead.
- edge_importance_weighting: (bool) If `True`, adds a learnable importance
weighting to the edges of the graph.
- **kwargs: (optional) Other parameters for graph convolution units.
Shape:
- Input: :math:`(N, in_channels, T_{in}, V_{in})`
- Output: :math:`(N, num_class)` where
:math:`N` is a batch size,
:math:`T_{in}` is a length of input sequence,
:math:`V_{in}` is the number of graph nodes,
or If num_class is `None`: `(N, out_channels)`
:math:`out_channels` is number of out_channels of the last layer.
"""
def __init__(
self,
in_channels,
graph_args,
num_class=None,
edge_importance_weighting=True,
**kwargs
):
super().__init__()
# Load graph.
graph = Graph(**graph_args)
A = torch.tensor(graph.A, dtype=torch.float32, requires_grad=False)
self.register_buffer("A", A)
# Networks.
spatial_kernel_size = A.size(0)
temporal_kernel_size = 9
kernel_size = (temporal_kernel_size, spatial_kernel_size)
kwargs0 = {k: v for k, v in kwargs.items() if k != "dropout"}
self.data_bn = nn.BatchNorm1d(in_channels * A.size(1))
self.st_gcn_networks = nn.ModuleList(
(
st_gcn(in_channels, 64, kernel_size, 1, residual=False, **kwargs0),
st_gcn(64, 64, kernel_size, 1, **kwargs),
st_gcn(64, 64, kernel_size, 1, **kwargs),
st_gcn(64, 64, kernel_size, 1, **kwargs),
st_gcn(64, 128, kernel_size, 2, **kwargs),
st_gcn(128, 128, kernel_size, 1, **kwargs),
st_gcn(128, 128, kernel_size, 1, **kwargs),
st_gcn(128, 256, kernel_size, 2, **kwargs),
st_gcn(256, 256, kernel_size, 1, **kwargs),
st_gcn(256, 256, kernel_size, 1, **kwargs),
)
)
# initialize parameters for edge importance weighting.
if edge_importance_weighting:
self.edge_importance = nn.ParameterList(
[nn.Parameter(torch.ones(A.size())) for i in self.st_gcn_networks]
)
else:
self.edge_importance = [1] * len(self.st_gcn_networks)
if num_class is not None:
self.cls = nn.Conv2d(256, num_class, kernel_size=1)
else:
self.cls = lambda x: x
def forward(self, x):
# data normalization.
N, C, T, V = x.size()
x = x.permute(0, 3, 1, 2).contiguous() # (N, V, C, T)
x = x.view(N, V * C, T)
x = self.data_bn(x)
x = x.view(N, V, C, T)
x = x.permute(0, 2, 3, 1).contiguous()
x = x.view(N, C, T, V)
# forward.
for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
x = gcn(x, self.A * importance)
x = F.avg_pool2d(x, x.size()[2:])
x = self.cls(x)
x = x.view(x.size(0), -1)
return x
class TwoStreamSpatialTemporalGraph(nn.Module):
"""Two inputs spatial temporal graph convolutional networks.
Args:
- graph_args: (dict) Args map of `Actionsrecognition.Utils.Graph` Class.
- num_class: (int) Number of class outputs.
- edge_importance_weighting: (bool) If `True`, adds a learnable importance
weighting to the edges of the graph.
- **kwargs: (optional) Other parameters for graph convolution units.
Shape:
- Input: :tuple of math:`((N, 3, T, V), (N, 2, T, V))`
for points and motions stream where.
:math:`N` is a batch size,
:math:`in_channels` is data channels (3 is (x, y, score)), (2 is (mot_x, mot_y))
:math:`T` is a length of input sequence,
:math:`V` is the number of graph nodes,
- Output: :math:`(N, num_class)`
"""
def __init__(self, graph_args, num_class, edge_importance_weighting=True, **kwargs):
super().__init__()
self.pts_stream = StreamSpatialTemporalGraph(
3, graph_args, None, edge_importance_weighting, **kwargs
)
self.mot_stream = StreamSpatialTemporalGraph(
2, graph_args, None, edge_importance_weighting, **kwargs
)
self.fcn = nn.Linear(256 * 2, num_class)
def forward(self, inputs):
out1 = self.pts_stream(inputs[0])
out2 = self.mot_stream(inputs[1])
concat = torch.cat([out1, out2], dim=-1)
out = self.fcn(concat)
return torch.sigmoid(out)
| 37.527132 | 92 | 0.575088 | 1,272 | 9,682 | 4.224057 | 0.153302 | 0.072585 | 0.030709 | 0.022148 | 0.480179 | 0.444444 | 0.38526 | 0.359948 | 0.30616 | 0.287921 | 0 | 0.021045 | 0.307994 | 9,682 | 257 | 93 | 37.673152 | 0.780896 | 0.417682 | 0 | 0.215827 | 0 | 0 | 0.004327 | 0 | 0 | 0 | 0 | 0 | 0.014388 | 1 | 0.057554 | false | 0 | 0.100719 | 0 | 0.215827 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec3fe1d1b9136cf779530748aa16a309021ed5cb | 4,572 | py | Python | baselines_pharmaco_nontrainable.py | francescobarbara/idad | 7931daeec5ae7db0c212d0b13f3c13d4784ecfdb | [
"MIT"
] | 3 | 2021-11-03T07:30:16.000Z | 2021-12-18T16:26:14.000Z | baselines_pharmaco_nontrainable.py | francescobarbara/idad | 7931daeec5ae7db0c212d0b13f3c13d4784ecfdb | [
"MIT"
] | null | null | null | baselines_pharmaco_nontrainable.py | francescobarbara/idad | 7931daeec5ae7db0c212d0b13f3c13d4784ecfdb | [
"MIT"
] | 3 | 2022-01-31T10:21:38.000Z | 2022-03-04T23:56:58.000Z | import os
import math
import argparse
from tqdm import tqdm
import pandas as pd
import torch
import torch.nn as nn
import pyro
import mlflow
from pharmacokinetic import Pharmacokinetic
from experiment_tools.pyro_tools import auto_seed
from experiment_tools.output_utils import get_mlflow_meta
from estimators.mi import PriorContrastiveEstimation, NestedMonteCarloEstimation
from neural.aggregators import ImplicitDeepAdaptiveDesign
from neural.baselines import RandomDesignBaseline, ConstantBatchBaseline
def evaluate_nontrainable_policy_pk(
mlflow_experiment_name,
num_experiments_to_perform,
policy, # random or equal_interval
device,
n_rollout=2048 * 2,
num_inner_samples=int(5e5),
seed=-1,
):
""" T designs at equal intervals """
pyro.clear_param_store()
seed = auto_seed(seed)
mlflow.set_experiment(mlflow_experiment_name)
mlflow.log_param("seed", seed)
mlflow.log_param("baseline_type", policy)
mlflow.log_param("n_rollout", n_rollout)
mlflow.log_param("num_inner_samples", num_inner_samples)
factor = 16
n_rollout = n_rollout // factor
n = 1
design_dim = (n, 1)
EIGs = pd.DataFrame(
columns=["mean_lower", "se_lower", "mean_upper", "se_upper"],
index=num_experiments_to_perform,
)
theta_prior_loc = torch.tensor([1, 0.1, 20], device=device).log()
# covariance of the prior
theta_prior_covmat = torch.eye(3, device=device) * 0.05
uniform_sampler = torch.distributions.Uniform(
torch.tensor(-5.0, device=device), torch.tensor(5.0, device=device)
)
for T in num_experiments_to_perform:
if policy == "equal_interval":
# ASSUMPTION: first design 5 min after administation
transformed_designs = (
torch.linspace(5.0 / 60, 23.9, T, dtype=torch.float32) / 24.0
)
equispaced_constant_policy = torch.log(
transformed_designs / (1 - transformed_designs)
).to(device)
design_net = ConstantBatchBaseline(
const_designs_list=equispaced_constant_policy.unsqueeze(1),
design_dim=design_dim,
)
elif policy == "random":
design_net = RandomDesignBaseline(
design_dim=design_dim, random_designs_dist=uniform_sampler
)
# Model and losses
pk_model = Pharmacokinetic(
design_net=design_net,
T=T,
theta_loc=theta_prior_loc,
theta_covmat=theta_prior_covmat,
)
pce_loss_lower = PriorContrastiveEstimation(
pk_model.model, factor, num_inner_samples
)
pce_loss_upper = NestedMonteCarloEstimation(
pk_model.model, factor, num_inner_samples
)
auto_seed(seed)
EIG_proxy_lower = torch.tensor(
[-pce_loss_lower.loss() for _ in range(n_rollout)]
)
auto_seed(seed)
EIG_proxy_upper = torch.tensor(
[-pce_loss_upper.loss() for _ in range(n_rollout)]
)
EIGs.loc[T, "mean_lower"] = EIG_proxy_lower.mean().item()
EIGs.loc[T, "se_lower"] = EIG_proxy_lower.std().item() / math.sqrt(n_rollout)
EIGs.loc[T, "mean_upper"] = EIG_proxy_upper.mean().item()
EIGs.loc[T, "se_upper"] = EIG_proxy_upper.std().item() / math.sqrt(n_rollout)
EIGs.to_csv(f"mlflow_outputs/eval.csv")
mlflow.log_artifact(f"mlflow_outputs/eval.csv", artifact_path="evaluation")
mlflow.log_param("status", "complete")
print(EIGs)
print("Done!")
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="iDAD: Pharmacokinetic model,nontrainable baselines."
)
parser.add_argument(
"--mlflow-experiment-name", default="pharmaco_baselines_nontrainable", type=str,
)
parser.add_argument("--seed", default=-1, type=int)
parser.add_argument(
"--policy", default="random", choices=["random", "equal_interval"], type=str
)
parser.add_argument("--num-experiments-to-perform", nargs="+", default=[5, 10])
parser.add_argument("--device", default="cuda", type=str)
args = parser.parse_args()
args.num_experiments_to_perform = [
int(x) if x else x for x in args.num_experiments_to_perform
]
evaluate_nontrainable_policy_pk(
mlflow_experiment_name=args.mlflow_experiment_name,
num_experiments_to_perform=args.num_experiments_to_perform,
device=args.device,
policy=args.policy,
seed=args.seed,
)
| 33.617647 | 88 | 0.667323 | 556 | 4,572 | 5.196043 | 0.289568 | 0.024922 | 0.044306 | 0.06369 | 0.222568 | 0.149533 | 0.097612 | 0 | 0 | 0 | 0 | 0.012518 | 0.23119 | 4,572 | 135 | 89 | 33.866667 | 0.809388 | 0.032152 | 0 | 0.052632 | 0 | 0 | 0.091733 | 0.029219 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008772 | false | 0 | 0.131579 | 0 | 0.149123 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec424d0484b0678464ceb56aac0631a916e0e37d | 311 | py | Python | bot/config.py | i-dubits/tg-bot-style | 798b42b2892307a668cc676be216634e7a7cf7de | [
"BSD-3-Clause"
] | null | null | null | bot/config.py | i-dubits/tg-bot-style | 798b42b2892307a668cc676be216634e7a7cf7de | [
"BSD-3-Clause"
] | null | null | null | bot/config.py | i-dubits/tg-bot-style | 798b42b2892307a668cc676be216634e7a7cf7de | [
"BSD-3-Clause"
] | null | null | null | TOKEN = '' # your token should be here
file_path_to_download = './images/'
checkpoint_dir = './scripts/checkpoints/'
nst_state_dict = './nst/vgg19-dcbb9e9d.pth' # you should download an vgg19 dict from here https://download.pytorch.org/models/vgg19-dcbb9e9d.pth
| 28.272727 | 144 | 0.639871 | 38 | 311 | 5.078947 | 0.710526 | 0.134715 | 0.165803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042735 | 0.247588 | 311 | 10 | 145 | 31.1 | 0.782051 | 0.398714 | 0 | 0 | 0 | 0 | 0.298913 | 0.25 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec43f38ce7e8616b6ecbb84b8e45c133aaf66327 | 2,849 | py | Python | blog/projectCode.py | iloveyougit/ylink2 | a87d8fde79ab259012cd6486299fcf86e1afc740 | [
"MIT"
] | null | null | null | blog/projectCode.py | iloveyougit/ylink2 | a87d8fde79ab259012cd6486299fcf86e1afc740 | [
"MIT"
] | null | null | null | blog/projectCode.py | iloveyougit/ylink2 | a87d8fde79ab259012cd6486299fcf86e1afc740 | [
"MIT"
] | null | null | null |
from django.shortcuts import redirect
from django.shortcuts import render
from django.utils import timezone
from .models import Post,views
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
import youtube_dl
from django.core.files import File
from .models import FileSaver
import codecs
from django.views.static import serve
import os
from subprocess import call
import requests
from wsgiref.util import FileWrapper
from django.http import HttpResponse
def post_new(request):
v = views.objects.get(pk=1)
v.k+=1
v.save()
v = v.k
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
l=post.text
f=post.format
ydl_opts = {}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(l, download=False)
download_target = ydl.prepare_filename(info)
a=download_target
print(a)
b=download_target[-3:]
print(b)
if (b=="mp4" or b=="mkv"):
a=download_target[:-3]
print(a)
else:
a=download_target[:-4]
print("else",a)
print(a)
if f=="1":
url="youtube-dl --extract-audio --audio-format mp3 "+l
a+="mp3"
ct='audio/mp3'
command = url
call(command.split(), shell=False)
if f=="2":
url="youtube-dl -f bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4 "+l
command = url
call(command.split(), shell=False)
a+="mp4"
print("f=2 mp4 video",a)
ct='video/mp4'
if f=="3":
url="youtube-dl -f bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4 "+l
a+="mp4"
ct='video/mkv'
command = url
call(command.split(), shell=False)
print(a)
filepath = a
wrapper = FileWrapper(open(filepath, 'rb'))
response = HttpResponse(wrapper, content_type=ct)
response['Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(filepath)
response['Content-Length'] = os.path.getsize(filepath)
response['Set-Cookie'] = 'fileDownload=true; Path=/'
return response
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form,'page_views':v})
| 32.747126 | 100 | 0.531064 | 326 | 2,849 | 4.576687 | 0.355828 | 0.046917 | 0.038204 | 0.050268 | 0.176944 | 0.135389 | 0.135389 | 0.063003 | 0.063003 | 0.063003 | 0 | 0.014231 | 0.358722 | 2,849 | 86 | 101 | 33.127907 | 0.802408 | 0 | 0 | 0.210526 | 0 | 0 | 0.126711 | 0.028782 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0 | 0.210526 | 0 | 0.263158 | 0.092105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec45f10d301fdd0d325a8177f688357eddd958d5 | 33,366 | py | Python | reference_kernels/PetFinder Slow and Steady Feature Building.py | MarcusJones/kaggle_petfinder_adoption | 2d745b48405f4d4211b523eae272b9169fcf9fa2 | [
"MIT"
] | 1 | 2019-01-24T04:22:39.000Z | 2019-01-24T04:22:39.000Z | reference_kernels/PetFinder Slow and Steady Feature Building.py | MarcusJones/kaggle_petfinder_adoption | 2d745b48405f4d4211b523eae272b9169fcf9fa2 | [
"MIT"
] | null | null | null | reference_kernels/PetFinder Slow and Steady Feature Building.py | MarcusJones/kaggle_petfinder_adoption | 2d745b48405f4d4211b523eae272b9169fcf9fa2 | [
"MIT"
] | null | null | null | '''
If you find this useful, please give a thumbs up!
Thanks!
- Claire & Alhan
https://github.com/alhankeser/kaggle-petfinder
'''
# External libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import RandomOverSampler
from sklearn.model_selection import train_test_split
from mlxtend.feature_selection import SequentialFeatureSelector as sfs
from sklearn.metrics import make_scorer
# from sklearn.metrics import accuracy_score
# from sklearn.metrics import confusion_matrix
import scipy.stats as stats
import math
import time
import traceback
import warnings
import os
import zipfile
import shutil
import sys
import json
# Options
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 200)
warnings.filterwarnings(action="ignore")
class Explore:
def get_dtype(cls, include_type=[], exclude_type=[]):
df = cls.get_df('train')
df.drop(columns=[cls.target_col], inplace=True)
return df.select_dtypes(include=include_type, exclude=exclude_type)
def get_non_numeric(cls):
return cls.get_dtype(exclude_type=['float64', 'int', 'float32'])
def get_numeric(cls):
return cls.get_dtype(exclude_type=['object', 'category'])
def get_categorical(cls, as_df=False):
return cls.get_dtype(include_type=['object'])
def get_correlations(cls, method='spearman'):
df = cls.get_df('train')
corr_mat = df.corr(method=method)
corr_mat.sort_values(cls.target_col, inplace=True)
corr_mat.drop(cls.target_col, inplace=True)
return corr_mat[[cls.target_col]]
def get_skewed_features(cls, df, features, skew_threshold=0.4):
feat_skew = pd.DataFrame(
{'skew': df[features].apply(lambda x: stats.skew(x))})
skewed = feat_skew[abs(feat_skew['skew']) > skew_threshold].index
return skewed.values
def show_boxplot(cls, x, y, **kwargs):
sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
def plot_categorical(cls, df, cols):
target = cls.target_col
categorical = pd.melt(df, id_vars=[target],
value_vars=cols)
grouped = categorical.groupby(['value', 'variable'],
as_index=False)[target]\
.mean().rename(columns={target: target + '_Mean'})
categorical = pd.merge(categorical, grouped, how='left',
on=['variable', 'value'])\
.sort_values(target + '_Mean')
facet_grid = sns.FacetGrid(categorical, col="variable",
col_wrap=3, size=5,
sharex=False, sharey=False,)
facet_grid = facet_grid.map(cls.show_boxplot, "value", target)
plt.savefig('boxplots.png')
class Clean:
def sample_ros(cls, df):
if df.name == 'train':
X = df.drop(cls.target_col, axis=1)
y = df[cls.target_col]
ros = RandomOverSampler(sampling_strategy='minority',
random_state=1)
X_ros, y_ros = ros.fit_sample(X, y)
df = pd.DataFrame(list(X_ros),
columns=df.drop(cls.target_col, axis=1)
.columns)
df[cls.target_col] = list(y_ros)
return df
def sample(cls, df, target_val_sets):
if df.name == 'train':
for target_val_set in target_val_sets:
df_class_0 = df[df[cls.target_col] == target_val_set[0]]
count_1 = df[cls.target_col].value_counts()[target_val_set[1]]
df_class_0_sampled = df_class_0.sample(count_1,
replace='True',
random_state=1)
df = pd.merge(df.drop(df_class_0.index),
df_class_0_sampled, how='outer')
return df
def keep_only_keep(cls, df):
to_drop = set(df.columns.values) - set(cls.keep)
if df.name == 'train':
to_drop = to_drop - set([cls.target_col])
to_drop = list(to_drop)
df.drop(to_drop, axis=1, inplace=True)
return df
def remove_outliers(cls, df):
if df.name == 'train':
# GrLivArea (1299 & 524)
# df.drop(df[(df['GrLivArea'] > 4000) &
# (df[cls.target_col] < 300000)].index,
# inplace=True)
pass
return df
def fill_by_type(cls, x, col):
if pd.isna(x):
if col.dtype == 'object':
return 0
return 0
return x
def fill_na(cls, df):
for col in df.columns:
df[col] = df[col].apply(lambda x: cls.fill_by_type(x, df[col]))
return df
def get_encoding_lookup(cls, cols):
df = cls.get_df('train')
target = cls.target_col
suffix = '_E'
result = pd.DataFrame()
for cat_feat in cols:
cat_feat_target = df[[cat_feat, target]].groupby(cat_feat)
cat_feat_encoded_name = cat_feat + suffix
order = pd.DataFrame()
order['val'] = df[cat_feat].unique()
order.index = order.val
order.drop(columns=['val'], inplace=True)
order[target + '_mean'] = cat_feat_target[[target]].median()
order['feature'] = cat_feat
order['encoded_name'] = cat_feat_encoded_name
order = order.sort_values(target + '_mean')
order['num_val'] = range(1, len(order)+1)
result = result.append(order)
result.reset_index(inplace=True)
return result
def get_scaled_categorical(cls, encoding_lookup):
scaled = encoding_lookup.copy()
target = cls.target_col
for feature in scaled['feature'].unique():
values = scaled[scaled['feature'] == feature]['num_val'].values
medians = scaled[
scaled['feature'] == feature][target + '_mean'].values
for median in medians:
scaled_value = ((values.min() + 1) *
(median / medians.min()))-1
scaled.loc[(scaled['feature'] == feature) &
(scaled[target + '_mean'] == median),
'num_val'] = scaled_value
return scaled
def encode_with_lookup(cls, df, encoding_lookup):
for encoded_index, encoded_row in encoding_lookup.iterrows():
feature = encoded_row['feature']
encoded_name = encoded_row['encoded_name']
value = encoded_row['val']
encoded_value = encoded_row['num_val']
df.loc[df[feature] == value, encoded_name] = encoded_value
return df
def encode_onehot(cls, df, cols):
df = pd.concat([df, pd.get_dummies(df[cols], drop_first=True)], axis=1)
return df
def encode_categorical(cls, df, cols=[], method='one_hot'):
if len(cols) == 0:
cols = cls.get_categorical().columns.values
if method == 'target_mean':
encoding_lookup = cls.get_encoding_lookup(cols)
encoding_lookup = cls.get_scaled_categorical(encoding_lookup)
df = cls.encode_with_lookup(df, encoding_lookup)
if method == 'one_hot':
if len(set(cols) - set(cls.get_dtype(include_type=['object'])
.columns.values)) > 0:
for col in cols:
df[col] = df[col].apply(lambda x: str(x))
df = cls.encode_onehot(df, cols)
df.drop(cols, axis=1, inplace=True)
return df
def fix_zero_infinity(cls, x):
if (x == 0) or math.isinf(x):
return 0
return x
def normalize_features(cls, df, cols=[]):
if len(cols) == 0:
cols = cls.get_numeric().columns.values
for col in cols:
if col in df.columns:
df[col] = df[col].apply(lambda x:
np.log1p(x).astype('float64'))
df[col] = df[col].apply(lambda x: cls.fix_zero_infinity(x))
return df
def scale_quant_features(cls, df, cols):
scaler = StandardScaler()
scaler.fit(df[cols])
scaled = scaler.transform(df[cols])
for i, col in enumerate(cols):
df[col] = scaled[:, i]
return df
def drop_ignore(cls, df):
for col in cls.ignore:
try:
df.drop(col, axis=1, inplace=True)
except Exception:
pass
return df
def drop_low_corr(cls, df, threshold=0.12):
to_drop = pd.DataFrame(columns=['drop'])
corr_mat = cls.get_correlations()
target = cls.target_col
to_drop['drop'] = corr_mat[(abs(corr_mat[target]) <= threshold)].index
df.drop(to_drop['drop'], axis=1, inplace=True)
return df
class Engineer:
def get_image_data(cls, json_path):
image_data = False
if os.path.isfile(json_path):
with open(json_path) as f:
try:
image_data = pd.DataFrame(
json.load(f)['labelAnnotations'])
except Exception:
pass
return image_data
def calculate_photo_scores(cls, df, x, match='exact',
start=1, stop=2):
try:
pet_id = x
pet_type = df[df['PetID'] == pet_id]['Type'].values[0]
pet_type_dict = {1: 'dog', 2: 'cat'}
pet_type = pet_type_dict[pet_type]
scores = []
score = 0
i = start
while (i > 0) & (i < stop):
json_path = path + '/input/train_metadata/'\
+ pet_id + '-' + str(i) + '.json'
image_data = cls.get_image_data(json_path)
try:
if match == 'exact':
scores.append(
image_data[image_data['description'] ==
pet_type]['score'].values[0])
except Exception:
scores.append(.0)
break
i += 1
try:
score = np.array(scores)
except Exception:
pass
except Exception:
print('########## calculate_photo_scores')
print(pet_id)
return score
def rate_first_photo(cls, x):
try:
score = x['AllPhotoScores'][0]
except Exception:
return 'Not Great'
pet_type = x['Type']
if pet_type == 1:
good_threshold = 0.96
if pet_type == 2:
good_threshold = 0.99
if score > good_threshold:
return 'Good'
if (score < good_threshold) & (score > .5):
return 'Okay'
return 'Not Great'
def rate_secondary_good_photos(cls, x):
count = 0
pet_type = x['Type']
scores = x['AllPhotoScores']
if pet_type == 1:
good_threshold = 0.96
if pet_type == 2:
good_threshold = 0.99
try:
scores = scores[1:]
count = len(scores[scores > good_threshold])
except Exception:
pass
if count > 2:
return 'Good'
if count > 0:
return 'Okay'
return 'Not Great'
def get_photo_scores(cls, df):
try:
df['AllPhotoScores'] = df['PetID']\
.apply(lambda x:
cls.calculate_photo_scores(df,
x, match='exact',
start=1, stop=99))
df['FirstPhotoScore'] = df[['Type', 'AllPhotoScores']]\
.apply(lambda x: cls.rate_first_photo(x), axis=1)
df['SecondaryPhotoScore'] = df[['AllPhotoScores', 'Type']]\
.apply(lambda x: cls.rate_secondary_good_photos(x), axis=1)
except Exception:
print('########## get_photo_scores')
print(df.head())
return df
def get_top_rescuers(cls, x, top_rescuers):
if x in top_rescuers:
return x
return False
def rescuer(cls, df):
top_rescuers = list(df['RescuerID'].value_counts().index[:5])
df['Big_Rescuer'] = df['RescuerID']\
.apply(lambda x: cls.get_top_rescuers(x, top_rescuers))
return df
def fee(cls, df):
df.loc[df['Fee'] > 0, 'Has_Fee'] = True
df.loc[df['Fee'] == 0, 'Has_Fee'] = False
return df
def photo(cls, df):
df.loc[df['PhotoAmt'] > 1, 'Has_2Photos'] = True
df.loc[df['PhotoAmt'] < 2, 'Has_2Photos'] = False
# df.loc[df['VideoAmt'] > 0, 'Has_Video'] = True
# df.loc[df['VideoAmt'] == 0, 'Has_Video'] = False
return df
def simplify_name_length(cls, x):
length = len(str(x))
if length < 3:
return 'short'
# if length < 20:
# return 'medium'
# if length > 19:
# return 'long'
return 'long'
def name_length(cls, df):
df['NameLength'] = df['Name']\
.apply(lambda x: cls.simplify_name_length(x))
return df
def get_name_groups(cls, df):
names = {}
names_by_count = df[df['Type'] == 1]['Name']\
.value_counts().index.tolist()
top5 = [a.lower() for a in names_by_count[:5]]
top30 = [a.lower() for a in names_by_count[:30]]
rest = [a.lower() for a in names_by_count[:]]
names['dog'] = {
'top5': top5,
'top30': top30,
'rest': rest
}
names_by_count = df[df['Type'] == 2]['Name']\
.value_counts().index.tolist()
top5 = [a.lower() for a in names_by_count[:5]]
top30 = [a.lower() for a in names_by_count[:30]]
rest = [a.lower() for a in names_by_count[:]]
names['cat'] = {
'top5': top5,
'top30': top30,
'rest': rest
}
return names
def simplify_names(cls, x, names):
x = str(x)
x = x.lower()
if 'nan' in x:
return 'NAN'
if x in names['top5']:
return 'top5'
# if x in names['top30']:
# return 'top30'
# if '&' in x:
# return 'and'
if x in names['rest']:
return 'rest'
def names(cls, df):
names = cls.get_name_groups(df)
df.loc[df['Type'] == 1, 'NameGroup'] = df[df['Type'] == 1]['Name']\
.apply(lambda x: cls.simplify_names(x, names['dog']))
df.loc[df['Type'] == 2, 'NameGroup'] = df[df['Type'] == 2]['Name']\
.apply(lambda x: cls.simplify_names(x, names['cat']))
return df
def color(cls, df):
df.loc[(df['Color3'] > 0) | (df['Color2'] > 0),
'Mixed_Color'] = True
df.loc[(df['Color3'] == 0) | (df['Color2'] == 0),
'Mixed_Color'] = False
return df
def simplify_quantity(cls, df):
bins = (0, 1, 10, 100)
group_names = ['solo', 'litter', 'herd']
categories = pd.cut(df['Quantity'], bins, labels=group_names)
return categories
def quantity(cls, df):
df.loc[df['Quantity'] == 0, 'Is_Solo'] = True
df.loc[df['Quantity'] > 0, 'Is_Solo'] = False
return df
def gender(cls, df):
df.loc[(df['Gender'] == 3) &
(df['Quantity'] == 2), 'Gender'] = 1.5
df.loc[(df['Gender'] == 3) &
(df['Quantity'] > 2), 'Gender'] = 0
return df
def breed(cls, df):
# df.loc[df['Breed2'] > 0, 'Mixed_Breed'] = True
# df.loc[df['Breed2'] == 0, 'Mixed_Breed'] = False
df.loc[df['Breed1'] == 307, 'Mixed_Breed'] = True
df.loc[df['Breed1'] != 307, 'Mixed_Breed'] = False
return df
def numerize_features(cls, df, cols):
train, test = cls.get_dfs()
df_combined = pd.concat([train[cols], test[cols]])
train.drop(cls.target_col, axis=1, inplace=True)
for feature in cols:
le = LabelEncoder()
df_combined[feature] = df_combined[feature].apply(lambda x: str(x))
df[feature] = df[feature].apply(lambda x: str(x))
le = le.fit(df_combined[feature])
df[feature] = le.transform(df[feature])
return df
def simplify_ages(cls, df, animal):
if animal == 'dog':
bins = (-1, 0, 2, 256)
group_names = ['baby', 'child', 'adult']
categories = pd.cut(df[df['Type'] == 1]['Age'], bins,
labels=group_names)
if animal == 'cat':
bins = (-1, 4, 256)
group_names = ['baby', 'adult']
categories = pd.cut(df[df['Type'] == 2]['Age'], bins,
labels=group_names)
return categories
def age(cls, df):
df.loc[df['Type'] == 1, 'AgeGroup'] = cls.simplify_ages(df, 'dog')
df.loc[df['Type'] == 2, 'AgeGroup'] = cls.simplify_ages(df, 'cat')
df.drop('Age', axis=1, inplace=True)
return df
def sum_features(cls, df, col_sum):
for col_set in col_sum:
f_name = '__'.join(col_set[:])
df[f_name] = df[[*col_set]].sum(axis=1)
df.drop(col_set, axis=1, inplace=True)
return df
def combine_features(cls, row, col_set):
result = ''
for col in col_set:
if result != '':
result += '_'
result += str(row[col])
return result
def combine(cls, df, col_sets):
for col_set in col_sets:
f_name = '__'.join(col_set[:])
df[f_name] = df.apply(lambda x: cls.combine_features(x, col_set),
axis=1)
df.drop(col_set, axis=1, inplace=True)
return df
def multiply_features(cls, df, feature_sets):
for feature_set in feature_sets:
# multipled_name = '_x_'.join(feature_set[:])
# df.drop(feature_set, axis=1, inplace=True)
pass
return df
class Model:
def forward_selection(cls, df, features_count=1):
if df.name == 'train':
qwk_scorer = make_scorer(cls.quadratic_weighted_kappa,
greater_is_better=True)
model = RandomForestClassifier(n_estimators=100, n_jobs=-1)
X = df.drop('AdoptionSpeed', axis=1)
y = df['AdoptionSpeed']
X_train, X_test,\
y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=42)
y_train = y_train.ravel()
y_test = y_test.ravel()
sfs1 = sfs(model,
k_features=3,
forward=True,
floating=False,
verbose=2,
scoring=qwk_scorer,
cv=5)
sfs1 = sfs1.fit(X_train, y_train)
best_cols = list(sfs1.k_feature_idx_)
return best_cols
def confusion_matrix(cls, rater_a, rater_b,
min_rating=None, max_rating=None):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Returns the confusion matrix between rater's ratings
"""
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(rater_a + rater_b)
if max_rating is None:
max_rating = max(rater_a + rater_b)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)]
for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
conf_mat[a - min_rating][b - min_rating] += 1
return conf_mat
def histogram(cls, ratings, min_rating=None, max_rating=None):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Returns the counts of each type of rating that a rater made
"""
if min_rating is None:
min_rating = min(ratings)
if max_rating is None:
max_rating = max(ratings)
num_ratings = int(max_rating - min_rating + 1)
hist_ratings = [0 for x in range(num_ratings)]
for r in ratings:
hist_ratings[r - min_rating] += 1
return hist_ratings
def quadratic_weighted_kappa(cls, rater_a, rater_b,
min_rating=0, max_rating=4):
"""
https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/quadratic_weighted_kappa.py
Calculates the quadratic weighted kappa
quadratic_weighted_kappa calculates the quadratic weighted kappa
value, which is a measure of inter-rater agreement between two raters
that provide discrete numeric ratings. Potential values range from -1
(representing complete disagreement) to 1 (representing complete
agreement). A kappa value of 0 is expected if all agreement is due to
chance.
quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b
each correspond to a list of integer ratings. These lists must have the
same length.
The ratings should be integers, and it is assumed that they contain
the complete range of possible ratings.
quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating
is the minimum possible rating, and max_rating is the maximum possible
rating
"""
rater_a = np.array(rater_a, dtype=int)
rater_b = np.array(rater_b, dtype=int)
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(min(rater_a), min(rater_b))
if max_rating is None:
max_rating = max(max(rater_a), max(rater_b))
conf_mat = cls.confusion_matrix(rater_a, rater_b,
min_rating, max_rating)
num_ratings = len(conf_mat)
num_scored_items = float(len(rater_a))
hist_rater_a = cls.histogram(rater_a, min_rating, max_rating)
hist_rater_b = cls.histogram(rater_b, min_rating, max_rating)
numerator = 0.0
denominator = 0.0
for i in range(num_ratings):
for j in range(num_ratings):
expected_count = (hist_rater_a[i] * hist_rater_b[j]
/ num_scored_items)
d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)
numerator += d * conf_mat[i][j] / num_scored_items
denominator += d * expected_count / num_scored_items
return 1.0 - numerator / denominator
def fix_shape(cls, df):
df_name = df.name
if df_name == 'train':
cols_to_add = set(cls.get_df('test').columns.values) -\
set(df.drop(cls.target_col, axis=1).columns.values)
if df_name == 'test':
cols_to_add = set(cls.get_df('train').drop(cls.target_col, axis=1)
.columns.values) - set(df.columns.values)
cols_to_add = np.array(list(cols_to_add))
cols_to_add = np.append(cols_to_add, df.columns.values)
df = df.reindex(columns=cols_to_add, fill_value=0)
df.name = df_name
return df
def cross_validate(cls, model, parameters):
train, test = cls.get_dfs()
# TODO: check if there are lists in parameters to run gridsearch
if len(train.drop(cls.target_col,
axis=1).columns) != len(test.columns):
cls.mutate(cls.fix_shape)
train = cls.get_df('train')
scores = np.array([])
skf = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
X = train.drop(columns=[cls.target_col])
y = train[cls.target_col]
for train_index, test_index in skf.split(X, y):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
cv_model = model(**parameters)
cv_model.fit(X_train, y_train)
X_predictions = cv_model.predict(X_test)
score = cls.quadratic_weighted_kappa(y_test, X_predictions, 0, 4)
scores = np.append(scores, score)
score = np.round(scores.mean(), decimals=5)
return score
def fit(cls, model, parameters):
train = cls.get_df('train')
X = train.drop(columns=[cls.target_col])
y = train[cls.target_col]
model = model(**parameters)
model.fit(X, y)
return model
def predict(cls, model):
test = cls.get_df('test')
predictions = model.predict(test)
return predictions
def save_predictions(cls, predictions, score=0, id_col=False):
now = str(time.time()).split('.')[0]
df = cls.get_df('test', False, True)
target = cls.target_col
if not id_col:
id_col = df.columns[0]
df[target] = predictions
if not os.path.exists(path + '/output'):
os.makedirs(path + '/output')
if os.path.exists(path + '/output'):
df[[id_col,
target]].to_csv(path + '/output/submit__'
+ str(int(score * 100000))
+ '__' + now +
'.csv', index=False)
df[[id_col, target]].to_csv('submission.csv', index=False)
class Data(Explore, Clean, Engineer, Model):
def __init__(self, train_csv, test_csv, target='',
ignore=[], keep=[], col_sum=[]):
'''Create pandas DataFrame objects for train and test data.
Positional arguments:
train_csv -- relative path to training data in csv format.
test_csv -- relative path to test data in csv format.
Keyword arguments:
target -- target feature column name in training data.
ignore -- columns names in list to ignore during analyses.
'''
self.__train = pd.read_csv(train_csv)
self.__test = pd.read_csv(test_csv)
self.__train.name, self.__test.name = self.get_df_names()
self.target_col = target
self.ignore = ignore
self.keep = keep
self.col_sum = col_sum
self.__original = False
self.__log = False
self.check_in()
self.debug = False
def __str__(cls):
train_columns = 'Train: \n"' + '", "'.join(cls.__train.head(2)) + '"\n'
test_columns = 'Test: \n"' + '", "'.join(cls.__test.head(2)) + '"\n'
return train_columns + test_columns
def get_df_names(cls):
return ('train', 'test')
def get_dfs(cls, ignore=False, originals=False, keep=False):
train, test = (cls.__train.copy(),
cls.__test.copy())
if originals:
train, test = (cls.__original)
if ignore:
train, test = (train.drop(columns=cls.ignore),
test.drop(columns=cls.ignore))
if keep:
train, test = (train[cls.keep],
test[cls.keep])
train.name, test.name = cls.get_df_names()
return (train, test)
def get_df(cls, name, ignore=False, original=False, keep=False):
train, test = cls.get_dfs(ignore, original, keep)
if name == 'train':
return train
if name == 'test':
return test
def log(cls, entry=False, status=False):
if cls.__log is False:
cls.__log = pd.DataFrame(columns=['entry', 'status'])
log_entry = pd.DataFrame({'entry': entry, 'status': status}, index=[0])
cls.__log = cls.__log.append(log_entry, ignore_index=True)
if status == 'Fail':
cls.rollback()
else:
cls.check_out()
if cls.debug:
cls.print_log()
def print_log(cls):
print(cls.__log)
def check_in(cls):
cls.__current = cls.get_dfs()
if cls.__original is False:
cls.__original = cls.__current
def check_out(cls):
cls.__previous = cls.__current
cls.__train.name, cls.__test.name = cls.get_df_names()
def rollback(cls):
try:
cls.__train, cls.__test = cls.__previous
status = 'Success - To Previous'
except Exception:
cls.__train, cls.__test = cls.__original
status = 'Success - To Original'
cls.log('rollback', status)
def reset(cls):
cls.__train, cls.__test = cls.__original
cls.log('reset', 'Success')
def update_dfs(cls, train, test):
train.name, test.name = cls.get_df_names()
cls.__train = train
cls.__test = test
def mutate(cls, mutation, *args):
'''Make changes to both train and test DataFrames.
Positional arguments:
mutation -- function to pass both train and test DataFrames to.
*args -- arguments to pass to the function, following each DataFrame.
Example usage:
def multiply_column_values(df, col_name, times=10):
#do magic...
Data.mutate(multiply_column_values, 'Id', 2)
'''
cls.check_in()
try:
train = mutation(cls.get_df('train'), *args)
test = mutation(cls.get_df('test'), *args)
cls.update_dfs(train, test)
status = 'Success'
except Exception:
print(traceback.print_exc())
status = 'Fail'
cls.log(mutation.__name__, status)
def run(d, model, parameters):
mutate = d.mutate
# mutate(d.sample, [[0, 1]])
# mutate(d.sample_ros)
# print(d.get_df('train')['AdoptionSpeed'].value_counts())
mutate(d.get_photo_scores)
# mutate(d.rescuer)
# mutate(d.age)
# mutate(d.gender)
# mutate(d.quantity)
# mutate(d.names)
# mutate(d.name_length)
# mutate(d.color)
# mutate(d.breed)
# mutate(d.fee)
# mutate(d.photo)
# mutate(d.sum_features, d.col_sum)
mutate(d.combine, [
# ['Breed1', 'Breed2'],
# ['Color1', 'Color2']
])
# mutate(d.fill_na)
mutate(d.numerize_features, [
# 'Breed1',
# 'Color1__Color2'
])
mutate(d.encode_categorical, [
'Type',
# 'AgeGroup',
# 'NameLength',
# 'Is_Solo',
# 'Has_2Photos',
'FirstPhotoScore',
'SecondaryPhotoScore'
])
mutate(d.drop_ignore)
# best_features = d.forward_selection(d.get_df('train'), 5)
# print('Best Features', best_features)
# sys.exit()
score = d.cross_validate(model, parameters)
print('Score: ', score)
print(d.get_df('train').head(2))
model = d.fit(model, parameters)
predictions = d.predict(model)
d.print_log()
return (predictions, score)
path = '.'
if os.getcwd().split('/')[1] == 'kaggle':
path = '..'
zip_files = list(filter(lambda x: '.zip' in x, os.listdir(path + '/input/')))
def unzip(file):
to_unzip = path + '/input/' + file
destination = path + '/input/' + file.split('.')[0]
with zipfile.ZipFile(to_unzip, 'r') as zip_ref:
zip_ref.extractall(destination)
def move_zips(move_from, move_to):
zip_files = list(filter(lambda x: '.zip' in x, os.listdir(move_from)))
if not os.path.exists(move_to):
os.makedirs(move_to)
for file in zip_files:
shutil.move(move_from + file, move_to + file)
if len(zip_files) > 0:
for file in zip_files:
unzip(file)
move_zips(path + '/input/', path + '/input/source_zips/')
model = RandomForestClassifier
parameters = {
'n_estimators': 100,
}
cols_to_ignore = ['PetID',
'RescuerID',
'Description',
'Name',
# 'Type',
'Age',
'Breed1',
'Breed2',
'Gender',
'Color1',
'Color2',
'Color3',
'MaturitySize',
'FurLength',
'Vaccinated',
'Dewormed',
'Sterilized',
'Health',
'Quantity',
'Fee',
'State',
'VideoAmt',
'PhotoAmt',
# Custom:
'AllPhotoScores',
]
id_col = 'PetID'
d = Data(path + '/input/train/train.csv',
path + '/input/test/test.csv',
'AdoptionSpeed',
ignore=cols_to_ignore)
predictions, score = run(d, model, parameters)
d.save_predictions(predictions, score, id_col)
| 35.723769 | 102 | 0.543457 | 4,084 | 33,366 | 4.247307 | 0.132958 | 0.010089 | 0.016488 | 0.008648 | 0.252047 | 0.189669 | 0.151735 | 0.110861 | 0.086418 | 0.075695 | 0 | 0.013555 | 0.334472 | 33,366 | 933 | 103 | 35.762058 | 0.767585 | 0.100611 | 0 | 0.207989 | 0 | 0 | 0.061898 | 0.002229 | 0.002755 | 0 | 0 | 0.001072 | 0.002755 | 1 | 0.103306 | false | 0.008264 | 0.030303 | 0.00551 | 0.242424 | 0.015152 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec4649677b2490023b21a9d452d053eff7e18fef | 58,219 | py | Python | bin/backup.py | TechGeek01/BackDrop | 9f72bcc4027df6bfa64b645cd6eb0e4148ae5557 | [
"MIT"
] | 49 | 2020-09-12T13:04:40.000Z | 2022-03-04T16:48:26.000Z | bin/backup.py | TechGeek01/BackDrop | 9f72bcc4027df6bfa64b645cd6eb0e4148ae5557 | [
"MIT"
] | 1 | 2021-03-12T03:14:31.000Z | 2021-03-12T03:14:31.000Z | bin/backup.py | TechGeek01/BackDrop | 9f72bcc4027df6bfa64b645cd6eb0e4148ae5557 | [
"MIT"
] | 1 | 2021-03-21T19:01:13.000Z | 2021-03-21T19:01:13.000Z | from tkinter import messagebox
import os
import itertools
from datetime import datetime
import shutil
import pickle
from bin.fileutils import human_filesize, get_directory_size
from bin.color import bcolor
from bin.threadmanager import ThreadManager
from bin.config import Config
from bin.status import Status
class Backup:
def __init__(self, config, backup_config_dir, backup_config_file, do_copy_fn, do_del_fn, start_backup_timer_fn, update_file_detail_list_fn, analysis_summary_display_fn, display_backup_command_info_fn, thread_manager, update_ui_component_fn=None, uicolor=None, progress=None):
"""Configure a backup to be run on a set of drives.
Args:
config (dict): The backup config to be processed.
backup_config_dir (String): The directory to store backup configs on each drive.
backup_config_file (String): The file to store backup configs on each drive.
do_copy_fn (def): The function to be used to handle file copying. TODO: Move do_copy_fn outside of Backup class.
do_del_fn (def): The function to be used to handle file copying. TODO: Move do_del_fn outside of Backup class.
start_backup_timer_fn (def): The function to be used to start the backup timer.
update_ui_component_fn (def): The function to be used to update UI components (default None).
update_file_detail_list_fn (def): The function to be used to update file lists.
analysis_summary_display_fn (def): The function to be used to show an analysis
summary.
display_backup_command_info_fn (def): The function to be used to enumerate command info
in the UI.
thread_manager (ThreadManager): The thread manager to check for kill flags.
uicolor (Color): The UI color instance to reference for styling (default None). TODO: Move uicolor outside of Backup class
progress (Progress): The progress tracker to bind to.
"""
self.totals = {
'master': 0,
'delete': 0,
'delta': 0,
'running': 0,
'buffer': 0,
'progressBar': 0
}
self.confirm_wipe_existing_drives = False
self.analysis_valid = False
self.analysis_started = False
self.analysis_running = False
self.backup_running = False
self.backup_start_time = 0
self.command_list = []
self.delete_file_list = {}
self.replace_file_list = {}
self.new_file_list = {}
self.config = config
self.DRIVE_VID_INFO = {drive['vid']: drive for drive in config['destinations']}
self.SHARE_NAME_PATH_INFO = {share['dest_name']: share['path'] for share in config['sources']}
self.BACKUP_CONFIG_DIR = backup_config_dir
self.BACKUP_CONFIG_FILE = backup_config_file
self.BACKUP_HASH_FILE = 'hashes.pkl'
self.CLI_MODE = self.config['cli_mode']
self.file_hashes = {drive['name']: {} for drive in self.config['destinations']}
self.uicolor = uicolor
self.do_copy_fn = do_copy_fn
self.do_del_fn = do_del_fn
self.start_backup_timer_fn = start_backup_timer_fn
self.update_ui_component_fn = update_ui_component_fn
self.update_file_detail_list_fn = update_file_detail_list_fn
self.analysis_summary_display_fn = analysis_summary_display_fn
self.display_backup_command_info_fn = display_backup_command_info_fn
self.thread_manager = thread_manager
self.progress = progress
def sanity_check(self):
"""Check to make sure everything is correct before a backup.
Before running a backup, or an analysis, both shares and drives need to be
selected, and the drive space on selected drives needs to be larger than the
total size of the selected shares.
Returns:
bool: True if conditions are good, False otherwise.
"""
if len(self.config['destinations']) > 0 and len(self.config['sources']) > 0:
share_total = 0
drive_total = 0
# Shares and destinations need identifiers
if self.config['source_mode'] in [Config.SOURCE_MODE_MULTI_DRIVE, Config.SOURCE_MODE_MULTI_PATH] and [share for share in self.config['sources'] if not share['dest_name']]:
return False
if self.config['dest_mode'] == Config.DEST_MODE_PATHS and [drive for drive in self.config['destinations'] if not drive['vid']]:
return False
shares_known = True
for share in self.config['sources']:
if share['size'] is None:
shares_known = False
break
# Add total space of selection
share_total += share['size']
drive_total = sum([drive['capacity'] for drive in self.config['destinations']])
config_total = drive_total + sum([size for drive, size in self.config['missing_drives'].items()])
if shares_known and ((len(self.config['missing_drives']) == 0 and share_total < drive_total) or (share_total < config_total and self.config['splitMode'])):
# Sanity check pass if more drive selected than shares, OR, split mode and more config drives selected than shares
selected_new_drives = [drive['name'] for drive in self.config['destinations'] if drive['hasConfig'] is False]
if not self.confirm_wipe_existing_drives and len(selected_new_drives) > 0:
drive_string = ', '.join(selected_new_drives[:-2] + [' and '.join(selected_new_drives[-2:])])
new_drive_confirm_title = f"New drive{'s' if len(selected_new_drives) > 1 else ''} selected"
new_drive_confirm_message = f"Drive{'s' if len(selected_new_drives) > 1 else ''} {drive_string} appear{'' if len(selected_new_drives) > 1 else 's'} to be new. Existing data will be deleted.\n\nAre you sure you want to continue?"
self.confirm_wipe_existing_drives = messagebox.askyesno(new_drive_confirm_title, new_drive_confirm_message)
return self.confirm_wipe_existing_drives
return True
return False
def get_share_source_path(self, share):
"""Convert a share name into a share path.
Args:
share (String): The share to convert.
Returns:
String: The source path for the given share.
"""
share_base = share.split(os.path.sep)[0]
share_slug = share[len(share_base):].strip(os.path.sep)
share_base_path = self.SHARE_NAME_PATH_INFO[share_base]
share_full_path = os.path.join(share_base_path, share_slug).strip(os.path.sep)
return share_full_path
# IDEA: When we ignore other stuff on the drives, and delete it, have a dialog popup that summarizes what's being deleted, and ask the user to confirm
def analyze(self):
"""Analyze the list of selected shares and drives and figure out how to split files.
Args:
shares (dict[]): The list of selected shares.
shares.name (String): The name of the share.
shares.size (int): The size in bytes of the share.
drives (tuple(String)): The list of selected drives.
This function is run in a new thread, but is only run if the backup config is valid.
If sanity_check() returns False, the analysis isn't run.
"""
# Sanity check for space requirements
if not self.sanity_check():
return
self.analysis_running = True
self.analysis_started = True
if not self.CLI_MODE:
self.progress.start_indeterminate()
self.update_ui_component_fn(Status.UPDATEUI_STATUS_BAR, Status.BACKUP_ANALYSIS_RUNNING)
self.update_ui_component_fn(Status.UPDATEUI_BACKUP_BTN, {'state': 'disable'})
self.update_ui_component_fn(Status.UPDATEUI_ANALYSIS_START)
self.update_ui_component_fn(Status.LOCK_TREE_SELECTION)
share_info = {share['dest_name']: share['size'] for share in self.config['sources']}
all_share_info = {share['dest_name']: share['size'] for share in self.config['sources']}
# Get hash list for all drives
bad_hash_files = []
self.file_hashes = {drive['name']: {} for drive in self.config['destinations']}
special_ignore_list = [self.BACKUP_CONFIG_DIR, '$RECYCLE.BIN', 'System Volume Information']
for drive in self.config['destinations']:
drive_hash_file_path = os.path.join(drive['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_HASH_FILE)
if os.path.isfile(drive_hash_file_path):
write_trimmed_changes = False
with open(drive_hash_file_path, 'rb') as f:
try:
# Load hash list, and filter out ignored folders
hash_list = pickle.load(f)
new_hash_list = {file_name: hash_val for file_name, hash_val in hash_list.items() if file_name.split('/')[0] not in special_ignore_list}
new_hash_list = {os.path.sep.join(file_name.split('/')): hash_val for file_name, hash_val in new_hash_list.items() if os.path.isfile(os.path.join(drive['name'], file_name))}
# If trimmed list is shorter, new changes have to be written to the file
if len(new_hash_list) < len(hash_list):
write_trimmed_changes = True
self.file_hashes[drive['name']] = new_hash_list
except Exception:
# Hash file is corrupt
bad_hash_files.append(drive_hash_file_path)
# If trimmed list is different length than original, write changes to file
if write_trimmed_changes:
with open(drive_hash_file_path, 'wb') as f:
pickle.dump({'/'.join(file_name.split(os.path.sep)): hash_val for file_name, hash_val in new_hash_list.items()}, f)
else:
# Hash file doesn't exist, so create it
if not os.path.exists(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR)):
os.makedirs(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR))
with open(drive_hash_file_path, 'wb') as f:
pickle.dump({}, f)
# If there are missing or corrupted pickle files, write empty data
if bad_hash_files:
for file in bad_hash_files:
with open(file, 'wb') as f:
pickle.dump({}, f)
drive_info = []
drive_share_list = {}
master_drive_list = [drive for drive in self.config['destinations']]
master_drive_list.extend([{'vid': vid, 'capacity': capacity} for vid, capacity in self.config['missing_drives'].items()])
connected_vid_list = [drive['vid'] for drive in self.config['destinations']]
show_drive_info = []
for i, drive in enumerate(master_drive_list):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
drive_connected = drive['vid'] in connected_vid_list
current_drive_info = drive
current_drive_info['connected'] = drive_connected
# If drive is connected, collect info about config size and free space
if drive_connected:
current_drive_info['configSize'] = get_directory_size(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR))
else:
current_drive_info['name'] = f"[{drive['vid']}]"
current_drive_info['configSize'] = 20000 # Assume 20K config size
current_drive_info['free'] = drive['capacity'] - drive['configSize']
drive_info.append(current_drive_info)
# Enumerate list for tracking what shares go where
drive_share_list[drive['vid']] = []
show_drive_info.append((current_drive_info['name'], human_filesize(drive['capacity']), drive_connected))
# For each drive, smallest first, filter list of shares to those that fit
drive_info.sort(key=lambda x: x['free'])
all_drive_files_buffer = {drive['name']: [] for drive in master_drive_list}
for i, drive in enumerate(drive_info):
# Get list of sources small enough to fit on drive
total_small_sources = {source: size for source, size in share_info.items() if size <= drive['free']}
# Since the list of files is truncated to prevent an unreasonably large
# number of combinations to check, we need to keep processing the file list
# in chunks to make sure we check if all files can be fit on one drive
sources_that_fit_on_dest = []
small_source_list = {}
processed_small_sources = []
processed_source_size = 0
while len(processed_small_sources) < len(total_small_sources):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# Trim the list of small files to those that aren't already processed
small_source_list = {source: size for (source, size) in total_small_sources.items() if source not in processed_small_sources}
small_source_list = sorted(small_source_list.items(), key=lambda x: x[1], reverse=True)
trimmed_small_source_list = {source[0]: source[1] for source in small_source_list[:15]}
# Try every combination of sources that fit to find result that uses most of that drive
largest_sum = 0
largest_set = []
for n in range(1, len(trimmed_small_source_list) + 1):
for subset in itertools.combinations(trimmed_small_source_list.keys(), n):
combination_total = sum(trimmed_small_source_list[share] for share in subset)
if (combination_total > largest_sum and combination_total <= drive['free']):
largest_sum = combination_total
largest_set = subset
sources_that_fit_on_dest.extend([source for source in largest_set])
remaining_small_sources = {source[0]: source[1] for source in small_source_list if source not in sources_that_fit_on_dest}
processed_small_sources.extend([source for source in trimmed_small_source_list.keys()])
share_info = {share: size for (share, size) in share_info.items() if share not in sources_that_fit_on_dest}
# Subtract file size of each batch of files from the free space on the drive so the next batch sorts properly
processed_source_size += sum([source[1] for source in small_source_list if source[0] in largest_set])
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# If not all shares fit on smallest drive at once (at least one share has to be put
# on the next largest drive), check free space on next largest drive
if len(sources_that_fit_on_dest) < len(small_source_list) and i < (len(drive_info) - 1):
not_fit_total = sum(size for size in remaining_small_sources.values())
next_drive = drive_info[i + 1]
next_drive_free_space = next_drive['free'] - not_fit_total
# If free space on next drive is less than total capacity of current drive, it
# becomes more efficient to skip current drive, and put all shares on the next
# drive instead.
# This applies only if they can all fit on the next drive. If they have to be
# split across multiple drives after moving them to a larger drive, then it's
# easier to fit what we can on the small drive, to leave the larger drives
# available for larger shares
if not_fit_total <= next_drive['free']:
total_small_share_space = sum(size for size in small_source_list.values())
if next_drive_free_space < drive['free'] and total_small_share_space <= next_drive['free']:
# Next drive free space less than total on current, so it's optimal to store on next drive instead
drive_share_list[next_drive['vid']].extend([share for share in small_source_list.keys()]) # All small shares on next drive
else:
# Better to leave on current, but overflow to next drive
drive_share_list[drive['vid']].extend(sources_that_fit_on_dest) # Shares that fit on current drive
drive_share_list[next_drive['vid']].extend([share for share in small_source_list.keys() if share not in sources_that_fit_on_dest]) # Remaining small shares on next drive
else:
# If overflow for next drive is more than can fit on that drive, ignore it, put overflow
# back in pool of shares to sort, and put small drive shares only in current drive
drive_share_list[drive['vid']].extend(sources_that_fit_on_dest) # Shares that fit on current drive
all_drive_files_buffer[drive['name']].extend([f"{drive['name']}{share}" for share in sources_that_fit_on_dest])
# Put remaining small shares back into pool to work with for next drive
share_info.update({share: size for share, size in remaining_small_sources.items()})
else:
# Fit all small shares onto drive
drive_share_list[drive['vid']].extend(sources_that_fit_on_dest)
# Calculate space used by shares, and subtract it from capacity to get free space
used_space = sum(all_share_info[share] for share in drive_share_list[drive['vid']])
drive_info[i]['free'] -= used_space
def split_share(share):
"""Recurse into a share or directory, and split the contents.
Args:
share (String): The share to split.
Returns:
dict[]: A list of shares to be split
dict.share (String): The share to split
dict.files (dict): The list of drive splits.
Key (String) is a drive volume ID,
Value (String[]) is a list of filenames for a given drive.
dict.exclusions (String[]): The list of files to exclude from the split.
"""
# Enumerate list for tracking what shares go where
drive_file_list = {drive['vid']: [] for drive in drive_info}
file_info = {}
share_path = self.get_share_source_path(share)
try:
for entry in os.scandir(share_path):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
if entry.is_file():
new_dir_size = entry.stat().st_size
elif entry.is_dir():
new_dir_size = get_directory_size(entry.path)
filename = entry.path[len(share_path):].strip(os.path.sep)
file_info[filename] = new_dir_size
except PermissionError:
pass
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return
# For splitting shares, sort by largest free space first
drive_info.sort(reverse=True, key=lambda x: x['free'])
for i, drive in enumerate(drive_info):
# Get list of files small enough to fit on drive
total_small_files = {file: size for file, size in file_info.items() if size <= drive['free']}
# Since the list of files is truncated to prevent an unreasonably large
# number of combinations to check, we need to keep processing the file list
# in chunks to make sure we check if all files can be fit on one drive
files_that_fit_on_drive = []
small_file_list = {}
processed_small_files = []
processed_file_size = 0
while len(processed_small_files) < len(total_small_files):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# Trim the list of small files to those that aren't already processed
small_file_list = {file: size for (file, size) in total_small_files.items() if file not in processed_small_files}
# Make sure we don't end with an unreasonable number of combinations to go through
# by sorting by largest first, and truncating
# Sorting files first, since files can't be split, so it's preferred to have directories last
file_list = {}
dir_list = {}
for file, size in small_file_list.items():
if os.path.isfile(os.path.join(share_path, file)):
file_list[file] = size
elif os.path.isdir(os.path.join(share_path, file)):
dir_list[file] = size
# Sort file list by largest first, and truncate to prevent unreasonably large number of combinations
small_file_list = sorted(file_list.items(), key=lambda x: x[1], reverse=True)
small_file_list.extend(sorted(dir_list.items(), key=lambda x: x[1], reverse=True))
trimmed_small_file_list = {file[0]: file[1] for file in small_file_list[:15]}
small_file_list = {file[0]: file[1] for file in small_file_list}
# Try every combination of shares that fit to find result that uses most of that drive
largest_sum = 0
largest_set = []
for n in range(1, len(trimmed_small_file_list) + 1):
for subset in itertools.combinations(trimmed_small_file_list.keys(), n):
combination_total = sum(trimmed_small_file_list[file] for file in subset)
if (combination_total > largest_sum and combination_total <= drive['free'] - processed_file_size):
largest_sum = combination_total
largest_set = subset
files_that_fit_on_drive.extend([file for file in largest_set])
processed_small_files.extend([file for file in trimmed_small_file_list.keys()])
file_info = {file: size for (file, size) in file_info.items() if file not in largest_set}
# Subtract file size of each batch of files from the free space on the drive so the next batch sorts properly
processed_file_size += sum([size for (file, size) in small_file_list.items() if file in largest_set])
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# Assign files to drive, and subtract filesize from free space
# Since we're sorting by largest free space first, there's no cases to move
# to a larger drive. This means all files that can fit should be put on the
# drive they fit on.
drive_file_list[drive['vid']].extend(files_that_fit_on_drive)
drive_info[i]['free'] -= processed_file_size
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return
share_split_summary = [{
'share': share,
'files': drive_file_list,
'exclusions': [file for file in file_info.keys()]
}]
for file in file_info.keys():
file_path = os.path.join(share, file)
share_split_summary.extend(split_share(file_path))
return share_split_summary
# For shares larger than all drives, recurse into each share
# share_info contains shares not sorted into drives
drive_exclusions = {drive['name']: [] for drive in master_drive_list}
for share in share_info.keys():
share_path = self.get_share_source_path(share)
if os.path.exists(share_path) and os.path.isdir(share_path):
summary = split_share(share)
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
# Build exclusion list for other drives\
# This is done by "inverting" the file list for each drive into a list of exclusions for other drives
for split in summary:
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
file_list = split['files']
for drive_vid, files in file_list.items():
# Add files to file list
all_drive_files_buffer[self.DRIVE_VID_INFO[drive_vid]['name']].extend(os.path.join(split['share'], file) for file in files)
# Each summary contains a split share, and any split subfolders, starting with
# the share and recursing into the directories
for split in summary:
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
share_name = split['share']
share_files = split['files']
share_exclusions = split['exclusions']
all_files = share_files.copy()
all_files['exclusions'] = share_exclusions
# For each drive, gather list of files to be written to other drives, and
# use that as exclusions
for drive_vid, files in share_files.items():
if len(files) > 0:
raw_exclusions = all_files.copy()
raw_exclusions.pop(drive_vid, None)
# Build master full exclusion list
master_exclusions = [file for file_list in raw_exclusions.values() for file in file_list]
# Remove share if excluded in parent splitting
if share_name in drive_exclusions[self.DRIVE_VID_INFO[drive_vid]['name']]:
drive_exclusions[self.DRIVE_VID_INFO[drive_vid]['name']].remove(share_name)
# Add new exclusions to list
drive_exclusions[self.DRIVE_VID_INFO[drive_vid]['name']].extend([os.path.join(share_name, file) for file in master_exclusions])
drive_share_list[drive_vid].append(share_name)
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
def recurse_file_list(directory):
"""Get a complete list of files in a directory.
Args:
directory (String): The directory to check.
Returns:
String[]: The file list.
"""
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return []
file_list = []
try:
if len(os.scandir(directory)) > 0:
for entry in os.scandir(directory):
# For each entry, either add file to list, or recurse into directory
if entry.is_file():
file_list.append(entry.path)
elif entry.is_dir():
file_list.append(entry.path)
file_list.extend(recurse_file_list(entry.path))
else:
# No files, so append dir to list
file_list.append(entry.path)
except NotADirectoryError:
return []
except PermissionError:
return []
except OSError:
return []
except TypeError:
return []
return file_list
# For each drive in file list buffer, recurse into each directory and build a complete file list
all_drive_files = {drive['name']: [] for drive in master_drive_list}
for drive, files in all_drive_files_buffer.items():
for file in files:
all_drive_files[drive].extend(recurse_file_list(file))
def build_delta_file_list(drive, path, shares, exclusions):
"""Get lists of files to delete and replace from the destination drive, that no longer
exist in the source, or have changed.
Args:
drive (String): The drive to check.
path (String): The path to check.
shares (String[]): The list of shares to check.
exclusions (String[]): The list of files and folders to exclude.
Returns:
{
'delete' (tuple(String, int)[]): The list of files and filesizes for deleting.
'replace' (tuple(String, int, int)[]): The list of files and source/dest filesizes for replacement.
}
"""
special_ignore_list = [self.BACKUP_CONFIG_DIR, '$RECYCLE.BIN', 'System Volume Information']
file_list = {
'delete': [],
'replace': []
}
try:
shares_to_process = [share for share in shares if share == path or path.find(share + os.path.sep) == 0]
for entry in os.scandir(os.path.join(drive, path)):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return file_list
stub_path = entry.path[len(drive):].strip(os.path.sep)
# For each entry, either add filesize to the total, or recurse into the directory
if entry.is_file():
file_stat = entry.stat()
if (stub_path.find(os.path.sep) == -1 # Files should not be on root of drive
# or not os.path.isfile(source_path) # File doesn't exist in source, so delete it
or stub_path in exclusions # File is excluded from drive
or len(shares_to_process) == 0): # File should only count if dir is share or child, not parent
file_list['delete'].append((drive, stub_path, file_stat.st_size))
self.update_file_detail_list_fn('delete', entry.path)
else: # File is in share on destination drive
target_share = max(shares_to_process, key=len)
path_slug = stub_path[len(target_share):].strip(os.path.sep)
share_path = self.get_share_source_path(target_share)
source_path = os.path.join(share_path, path_slug)
if os.path.isfile(source_path): # File exists on source
if (file_stat.st_mtime != os.path.getmtime(source_path) # Existing file is older than source
or file_stat.st_size != os.path.getsize(source_path)): # Existing file is different size than source
# If existing dest file is not same time as source, it needs to be replaced
file_list['replace'].append((drive, target_share, path_slug, os.path.getsize(source_path), file_stat.st_size))
self.update_file_detail_list_fn('copy', entry.path)
else: # File doesn't exist on source, so delete it
file_list['delete'].append((drive, stub_path, file_stat.st_size))
self.update_file_detail_list_fn('delete', entry.path)
elif entry.is_dir():
found_share = False
for item in shares:
path_slug = stub_path[len(item):].strip(os.path.sep)
share_path = self.get_share_source_path(item)
source_path = os.path.join(share_path, path_slug)
if (stub_path == item # Dir is share, so it stays
or (stub_path.find(item + os.path.sep) == 0 and os.path.isdir(source_path)) # Dir is subdir inside share, and it exists in source
or item.find(stub_path + os.path.sep) == 0): # Dir is parent directory of a share we're copying, so it stays
# Recurse into the share
new_list = build_delta_file_list(drive, stub_path, shares, exclusions)
file_list['delete'].extend(new_list['delete'])
file_list['replace'].extend(new_list['replace'])
found_share = True
break
if (not found_share or stub_path in exclusions) and stub_path not in special_ignore_list:
# Directory isn't share, or part of one, and isn't a special folder or
# exclusion, so delete it
file_list['delete'].append((drive, stub_path, get_directory_size(entry.path)))
self.update_file_detail_list_fn('delete', entry.path)
except NotADirectoryError:
return {
'delete': [],
'replace': []
}
except PermissionError:
return {
'delete': [],
'replace': []
}
except OSError:
return {
'delete': [],
'replace': []
}
return file_list
def build_new_file_list(drive, path, shares, exclusions):
"""Get lists of files to copy to the destination drive, that only exist on the
source.
Args:
drive (String): The drive to check.
path (String): The path to check.
shares (String[]): The list of shares the drive should contain.
exclusions (String[]): The list of files and folders to exclude.
Returns:
{
'new' (tuple(String, int)[]): The list of file destinations and filesizes to copy.
}
"""
def scan_share_source_for_new_files(drive, share, path, exclusions, all_shares):
"""Get lists of files to copy to the destination drive from a given share.
Args:
drive (String): The drive to check.
share (String): The share to check.
path (String): The path to check.
exclusions (String[]): The list of files and folders to exclude.
all_shares (String[]): The list of shares the drive should contain, to
avoid recursing into split shares.
Returns:
{
'new' (tuple(String, int)[]): The list of file destinations and filesizes to copy.
}
"""
file_list = {
'new': []
}
try:
share_path = self.get_share_source_path(share)
source_path = os.path.join(share_path, path)
# Check if directory has files
if len(os.listdir(source_path)) > 0:
for entry in os.scandir(source_path):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
return file_list
stub_path = entry.path[len(share_path):].strip(os.path.sep)
exclusion_stub_path = os.path.join(share, stub_path)
target_path = os.path.join(drive, share, stub_path)
# For each entry, either add filesize to the total, or recurse into the directory
if entry.is_file():
if (not os.path.isfile(target_path) # File doesn't exist in destination drive
and exclusion_stub_path not in exclusions): # File isn't part of drive exclusion
file_list['new'].append((drive, share, stub_path, entry.stat().st_size))
self.update_file_detail_list_fn('copy', target_path)
elif entry.is_dir():
# Avoid recursing into any split share directories and double counting files
if exclusion_stub_path not in all_shares:
if os.path.isdir(target_path):
# If exists on dest, recurse into it
new_list = scan_share_source_for_new_files(drive, share, stub_path, exclusions, all_shares)
file_list['new'].extend(new_list['new'])
# break
elif exclusion_stub_path not in exclusions:
# Path doesn't exist on dest, so add to list if not excluded
new_list = scan_share_source_for_new_files(drive, share, stub_path, exclusions, all_shares)
file_list['new'].extend(new_list['new'])
# break
elif not os.path.isdir(os.path.join(drive, share, path)):
# If no files in folder on source, create empty folder in destination
return {
'new': [(drive, share, path, get_directory_size(os.path.join(source_path, path)))]
}
except NotADirectoryError:
return {
'new': []
}
except PermissionError:
return {
'new': []
}
except OSError:
return {
'new': []
}
return file_list
file_list = {
'new': []
}
for share in shares:
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
file_list['new'].extend(scan_share_source_for_new_files(drive, share, path, exclusions, shares)['new'])
return file_list
# Build list of files/dirs to delete and replace
self.delete_file_list = {}
self.replace_file_list = {}
self.new_file_list = {}
purge_command_list = []
copy_command_list = []
display_purge_command_list = []
display_copy_command_list = []
for drive, shares in drive_share_list.items():
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
modified_file_list = build_delta_file_list(self.DRIVE_VID_INFO[drive]['name'], '', shares, drive_exclusions[self.DRIVE_VID_INFO[drive]['name']])
delete_items = modified_file_list['delete']
if len(delete_items) > 0:
self.delete_file_list[self.DRIVE_VID_INFO[drive]['name']] = delete_items
file_delete_list = [os.path.join(drive, file) for drive, file, size in delete_items]
display_purge_command_list.append({
'enabled': True,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'size': sum([size for drive, file, size in delete_items]),
'fileList': file_delete_list,
'mode': 'delete'
})
purge_command_list.append({
'displayIndex': len(display_purge_command_list) + 1,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'fileList': file_delete_list,
'payload': delete_items,
'mode': 'delete'
})
# Build list of files to replace
replace_items = modified_file_list['replace']
replace_items.sort(key=lambda x: x[1])
if len(replace_items) > 0:
self.replace_file_list[self.DRIVE_VID_INFO[drive]['name']] = replace_items
file_replace_list = [os.path.join(drive, share, file) for drive, share, file, source_size, dest_size in replace_items]
display_copy_command_list.append({
'enabled': True,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'size': sum([source_size for drive, share, file, source_size, dest_size in replace_items]),
'fileList': file_replace_list,
'mode': 'replace'
})
copy_command_list.append({
'displayIndex': len(display_purge_command_list) + 1,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'fileList': file_replace_list,
'payload': replace_items,
'mode': 'replace'
})
# Build list of new files to copy
new_items = build_new_file_list(self.DRIVE_VID_INFO[drive]['name'], '', shares, drive_exclusions[self.DRIVE_VID_INFO[drive]['name']])['new']
if len(new_items) > 0:
self.new_file_list[self.DRIVE_VID_INFO[drive]['name']] = new_items
file_copy_list = [os.path.join(drive, share, file) for drive, share, file, size in new_items]
display_copy_command_list.append({
'enabled': True,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'size': sum([size for drive, share, file, size in new_items]),
'fileList': file_copy_list,
'mode': 'copy'
})
copy_command_list.append({
'displayIndex': len(display_purge_command_list) + 1,
'type': 'fileList',
'drive': self.DRIVE_VID_INFO[drive]['name'],
'fileList': file_copy_list,
'payload': new_items,
'mode': 'copy'
})
# Gather and summarize totals for analysis summary
show_file_info = []
for i, drive in enumerate(drive_share_list.keys()):
if self.thread_manager.threadlist['Backup Analysis']['killFlag']:
break
file_summary = []
drive_total = {
'running': 0,
'delta': 0,
'delete': 0,
'replace': 0,
'copy': 0,
'new': 0
}
if self.DRIVE_VID_INFO[drive]['name'] in self.delete_file_list.keys():
drive_total['delete'] = sum([size for drive, file, size in self.delete_file_list[self.DRIVE_VID_INFO[drive]['name']]])
drive_total['running'] -= drive_total['delete']
self.totals['delta'] -= drive_total['delete']
file_summary.append(f"Deleting {len(self.delete_file_list[self.DRIVE_VID_INFO[drive]['name']])} files ({human_filesize(drive_total['delete'])})")
if self.DRIVE_VID_INFO[drive]['name'] in self.replace_file_list.keys():
drive_total['replace'] = sum([source_size for drive, share, file, source_size, dest_size in self.replace_file_list[self.DRIVE_VID_INFO[drive]['name']]])
drive_total['running'] += drive_total['replace']
drive_total['copy'] += drive_total['replace']
drive_total['delta'] += sum([source_size - dest_size for drive, share, file, source_size, dest_size in self.replace_file_list[self.DRIVE_VID_INFO[drive]['name']]])
file_summary.append(f"Updating {len(self.replace_file_list[self.DRIVE_VID_INFO[drive]['name']])} files ({human_filesize(drive_total['replace'])})")
if self.DRIVE_VID_INFO[drive]['name'] in self.new_file_list.keys():
drive_total['new'] = sum([size for drive, share, file, size in self.new_file_list[self.DRIVE_VID_INFO[drive]['name']]])
drive_total['running'] += drive_total['new']
drive_total['copy'] += drive_total['new']
drive_total['delta'] += drive_total['new']
file_summary.append(f"{len(self.new_file_list[self.DRIVE_VID_INFO[drive]['name']])} new files ({human_filesize(drive_total['new'])})")
# Increment master totals
# Double copy total to account for both copy and verify operations
self.totals['master'] += 2 * drive_total['copy'] + drive_total['delete']
self.totals['delete'] += drive_total['delete']
self.totals['delta'] += drive_total['delta']
if len(file_summary) > 0:
show_file_info.append((self.DRIVE_VID_INFO[drive]['name'], '\n'.join(file_summary)))
if not self.thread_manager.threadlist['Backup Analysis']['killFlag']:
self.analysis_summary_display_fn(
title='Files',
payload=show_file_info
)
# Concat both lists into command list
self.command_list = [cmd for cmd in purge_command_list]
self.command_list.extend([cmd for cmd in copy_command_list])
# Concat lists into display command list
display_command_list = [cmd for cmd in display_purge_command_list]
display_command_list.extend([cmd for cmd in display_copy_command_list])
# Fix display index on command list
for i, cmd in enumerate(self.command_list):
self.command_list[i]['displayIndex'] = i
self.analysis_summary_display_fn(
title='Summary',
payload=[(self.DRIVE_VID_INFO[drive]['name'], '\n'.join(shares), drive in connected_vid_list) for drive, shares in drive_share_list.items()]
)
self.display_backup_command_info_fn(display_command_list)
self.analysis_valid = True
if not self.CLI_MODE:
self.update_ui_component_fn(Status.UPDATEUI_STATUS_BAR, Status.BACKUP_READY_FOR_BACKUP)
self.update_ui_component_fn(Status.UPDATEUI_BACKUP_BTN, {'state': 'normal'})
self.update_ui_component_fn(Status.UPDATEUI_ANALYSIS_END)
else:
# If thread halted, mark analysis as invalid
if not self.CLI_MODE:
self.update_ui_component_fn(Status.UPDATEUI_STATUS_BAR, Status.BACKUP_READY_FOR_ANALYSIS)
self.update_ui_component_fn(Status.UPDATEUI_ANALYSIS_END)
self.update_ui_component_fn(Status.RESET_ANALYSIS_OUTPUT)
self.update_ui_component_fn(Status.UNLOCK_TREE_SELECTION)
if not self.CLI_MODE:
self.progress.stop_indeterminate()
self.analysis_running = False
# TODO: Make changes to existing @config check the existing for missing @drives, and delete the config file from drives we unselected if there's multiple drives in a config
# TODO: If a @drive @config is overwritten with a new config file, due to the drive
# being configured for a different backup, then we don't want to delete that file
# In that case, the config file should be ignored. Thus, we need to delete configs
# on unselected drives only if the config file on the drive we want to delete matches
# the config on selected drives
# TODO: When @drive @selection happens, drives in the @config should only be selected if the config on the other drive matches. If it doesn't don't select it by default, and warn about a conflict.
def write_config_to_disks(self):
"""Write the current running backup config to config files on the drives."""
if self.config['sources'] and self.config['destinations']:
share_list = ','.join([item['dest_name'] for item in self.config['sources']])
raw_vid_list = [drive['vid'] for drive in self.config['destinations']]
raw_vid_list.extend(self.config['missing_drives'].keys())
vid_list = ','.join(raw_vid_list)
# For each drive letter connected, get drive info, and write file
for drive in self.config['destinations']:
# If config exists on drives, back it up first
if os.path.isfile(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_CONFIG_FILE)):
shutil.move(os.path.join(drive['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_CONFIG_FILE), os.path.join(drive['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_CONFIG_FILE + '.old'))
drive_config_file = Config(os.path.join(self.DRIVE_VID_INFO[drive['vid']]['name'], self.BACKUP_CONFIG_DIR, self.BACKUP_CONFIG_FILE))
# Write shares and VIDs to config file
drive_config_file.set('selection', 'sources', share_list)
drive_config_file.set('selection', 'vids', vid_list)
# Write info for each drive to its own section
for cur_drive in self.config['destinations']:
drive_config_file.set(cur_drive['vid'], 'vid', cur_drive['vid'])
drive_config_file.set(cur_drive['vid'], 'serial', cur_drive['serial'])
drive_config_file.set(cur_drive['vid'], 'capacity', cur_drive['capacity'])
# Write info for missing drives
for drive_vid, capacity in self.config['missing_drives'].items():
drive_config_file.set(drive_vid, 'vid', drive_vid)
drive_config_file.set(drive_vid, 'serial', 'Unknown')
drive_config_file.set(drive_vid, 'capacity', capacity)
def run(self):
"""Once the backup analysis is run, and drives and shares are selected, run the backup.
This function is run in a new thread, but is only run if the backup config is valid.
If sanity_check() returns False, the backup isn't run.
"""
self.backup_running = True
if not self.analysis_valid or not self.sanity_check():
return
if not self.CLI_MODE:
self.update_ui_component_fn(Status.UPDATEUI_BACKUP_START)
self.update_ui_component_fn(Status.LOCK_TREE_SELECTION)
self.progress.set(0)
self.progress.set_max(self.totals['master'])
for cmd in self.command_list:
self.cmd_info_blocks[cmd['displayIndex']].state.configure(text='Pending', fg=self.uicolor.PENDING)
if cmd['type'] == 'fileList':
self.cmd_info_blocks[cmd['displayIndex']].configure('current_file', text='Pending', fg=self.uicolor.PENDING)
self.cmd_info_blocks[cmd['displayIndex']].configure('progress', text='Pending', fg=self.uicolor.PENDING)
# Write config file to drives
self.write_config_to_disks()
self.totals['running'] = 0
self.totals['buffer'] = 0
self.totals['progressBar'] = 0
timer_started = False
for cmd in self.command_list:
if cmd['type'] == 'fileList':
if not self.CLI_MODE:
self.cmd_info_blocks[cmd['displayIndex']].state.configure(text='Running', fg=self.uicolor.RUNNING)
if not timer_started:
timer_started = True
self.backup_start_time = datetime.now()
self.thread_manager.start(ThreadManager.KILLABLE, name='backupTimer', target=self.start_backup_timer_fn)
if cmd['mode'] == 'delete':
for drive, file, size in cmd['payload']:
if self.thread_manager.threadlist['Backup']['killFlag']:
break
src = os.path.join(drive, file)
gui_options = {
'displayIndex': cmd['displayIndex']
}
self.do_del_fn(src, size, gui_options)
# If file hash was in list, remove it, and write changes to file
if file in self.file_hashes[drive].keys():
del self.file_hashes[drive][file]
drive_hash_file_path = os.path.join(drive, self.BACKUP_CONFIG_DIR, self.BACKUP_HASH_FILE)
with open(drive_hash_file_path, 'wb') as f:
hash_list = {'/'.join(file_name.split(os.path.sep)): hash_val for file_name, hash_val in self.file_hashes[drive].items()}
pickle.dump(hash_list, f)
if cmd['mode'] == 'replace':
for drive, share, file, source_size, dest_size in cmd['payload']:
if self.thread_manager.threadlist['Backup']['killFlag']:
break
share_path = self.get_share_source_path(share)
src = os.path.join(share_path, file)
dest = os.path.join(drive, share, file)
gui_options = {
'displayIndex': cmd['displayIndex']
}
file_hashes = self.do_copy_fn(src, dest, drive, gui_options)
self.file_hashes[drive].update(file_hashes)
# Write updated hash file to drive
drive_hash_file_path = os.path.join(drive, self.BACKUP_CONFIG_DIR, self.BACKUP_HASH_FILE)
with open(drive_hash_file_path, 'wb') as f:
hash_list = {'/'.join(file_name.split(os.path.sep)): hash_val for file_name, hash_val in self.file_hashes[drive].items()}
pickle.dump(hash_list, f)
elif cmd['mode'] == 'copy':
for drive, share, file, size in cmd['payload']:
if self.thread_manager.threadlist['Backup']['killFlag']:
break
share_path = self.get_share_source_path(share)
src = os.path.join(share_path, file)
dest = os.path.join(drive, share, file)
gui_options = {
'displayIndex': cmd['displayIndex']
}
file_hashes = self.do_copy_fn(src, dest, drive, gui_options)
self.file_hashes[drive].update(file_hashes)
# Write updated hash file to drive
drive_hash_file_path = os.path.join(drive, self.BACKUP_CONFIG_DIR, self.BACKUP_HASH_FILE)
with open(drive_hash_file_path, 'wb') as f:
hash_list = {'/'.join(file_name.split(os.path.sep)): hash_val for file_name, hash_val in self.file_hashes[drive].items()}
pickle.dump(hash_list, f)
if self.thread_manager.threadlist['Backup']['killFlag'] and self.totals['running'] < self.totals['master']:
if not self.CLI_MODE:
self.cmd_info_blocks[cmd['displayIndex']].state.configure(text='Aborted', fg=self.uicolor.STOPPED)
self.cmd_info_blocks[cmd['displayIndex']].configure('progress', text='Aborted', fg=self.uicolor.STOPPED)
else:
print(f"{bcolor.FAIL}Backup aborted by user{bcolor.ENDC}")
break
else:
if not self.CLI_MODE:
self.cmd_info_blocks[cmd['displayIndex']].state.configure(text='Done', fg=self.uicolor.FINISHED)
self.cmd_info_blocks[cmd['displayIndex']].configure('progress', text='Done', fg=self.uicolor.FINISHED)
else:
print(f"{bcolor.OKGREEN}Backup finished{bcolor.ENDC}")
self.thread_manager.kill('backupTimer')
if not self.CLI_MODE:
self.update_ui_component_fn(Status.UPDATEUI_BACKUP_END)
self.update_ui_component_fn(Status.UNLOCK_TREE_SELECTION)
self.backup_running = False
def get_backup_start_time(self):
"""
Returns:
datetime: The time the backup started. (default 0)
"""
if self.backup_start_time:
return self.backup_start_time
else:
return 0
def is_running(self):
"""
Returns:
bool: Whether or not the backup is actively running something.
"""
return self.analysis_running or self.backup_running
| 51.796263 | 279 | 0.571892 | 7,015 | 58,219 | 4.533571 | 0.073984 | 0.021633 | 0.010691 | 0.015596 | 0.550986 | 0.470019 | 0.410527 | 0.379744 | 0.354998 | 0.29145 | 0 | 0.002166 | 0.341727 | 58,219 | 1,123 | 280 | 51.842386 | 0.82768 | 0.20574 | 0 | 0.381503 | 0 | 0.00578 | 0.075469 | 0.00995 | 0 | 0 | 0 | 0.003562 | 0 | 1 | 0.018786 | false | 0.001445 | 0.015896 | 0 | 0.08237 | 0.00289 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec4668ea105fe43b89baef884539542d10b494a5 | 1,134 | py | Python | From Another World/bullet.py | Grantlee11/From_Another_World_Pygame | 1aa98162a458a1a4aacfbc9170eaa233db055e9e | [
"CC-BY-3.0"
] | null | null | null | From Another World/bullet.py | Grantlee11/From_Another_World_Pygame | 1aa98162a458a1a4aacfbc9170eaa233db055e9e | [
"CC-BY-3.0"
] | null | null | null | From Another World/bullet.py | Grantlee11/From_Another_World_Pygame | 1aa98162a458a1a4aacfbc9170eaa233db055e9e | [
"CC-BY-3.0"
] | null | null | null | import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""A CLASS TO MANAGE BULLETS FIRED FROM THE SHIP"""
def __init__(self, ai_settings, screen, ship):
"""CREATE A BULLET OBJECT AT THE SHIP'S CURRENT POSITION"""
super(Bullet, self).__init__()
self.screen = screen
# CREATE A BULLET RECT AT (0, 0) AND THEN SET CORRECT POSITION
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# STORE THE BULLET'S POSITION AS A DECIMAL VALUE
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
"""MOVE THE BULLET UP THE SCREEN"""
# UPDATE THE DECIMAL POSITION OF THE BULLET
self.y -= self.speed_factor
# UPDATE THE RECT POSITION
self.rect.y = self.y
def draw_bullet(self):
"""DRAW THE BULLET TO THE SCREEN"""
pygame.draw.rect(self.screen, self.color, self.rect)
| 34.363636 | 90 | 0.638448 | 159 | 1,134 | 4.421384 | 0.314465 | 0.068279 | 0.091038 | 0.036984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004819 | 0.268078 | 1,134 | 33 | 91 | 34.363636 | 0.842169 | 0.295414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec4d41b5339a13570027a14f5668646325d52607 | 1,648 | py | Python | tcc_rpi/adafruit_mcp3008.py | MegaNo0body/tcc | 469824a8afc1cf846793212d42f6c8c43ee4b0bf | [
"MIT"
] | 1 | 2016-09-29T22:39:31.000Z | 2016-09-29T22:39:31.000Z | tcc_rpi/adafruit_mcp3008.py | MegaNo0body/tcc | 469824a8afc1cf846793212d42f6c8c43ee4b0bf | [
"MIT"
] | null | null | null | tcc_rpi/adafruit_mcp3008.py | MegaNo0body/tcc | 469824a8afc1cf846793212d42f6c8c43ee4b0bf | [
"MIT"
] | null | null | null | import time
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
# change these as desired - they're the pins connected from the
# SPI port on the ADC to the Cobbler
SPICLK = 11
SPIMISO = 9
SPIMOSI = 10
SPICS = 22
GPIO.setwarnings(False)
# set up the SPI interface pins
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
MAX_VARIATION = 30
SUCCESS_TRIES = 3
adc_channel = int(input())
value = 0
success = 0
while success < SUCCESS_TRIES:
readvalue = readadc(adc_channel, SPICLK, SPIMOSI, SPIMISO, SPICS)
if abs(readvalue - value) < MAX_VARIATION:
success += 1
else:
success = 0
value = readvalue
time.sleep(1.0)
print(value)
GPIO.cleanup()
| 21.684211 | 66 | 0.695995 | 258 | 1,648 | 4.422481 | 0.434109 | 0.087642 | 0.078878 | 0.057844 | 0.113059 | 0.089395 | 0.089395 | 0.089395 | 0.089395 | 0 | 0 | 0.03386 | 0.193568 | 1,648 | 75 | 67 | 21.973333 | 0.82468 | 0.219053 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008621 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.054545 | 0 | 0.109091 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec50387d2267d7e50a0c69f978fb2011b6de161c | 1,720 | py | Python | apps/poker/views/settings.py | deniskrumko/izyan-poker | ce70c9c8f761409adad289809e5220237b312407 | [
"MIT"
] | 6 | 2019-08-05T07:37:52.000Z | 2021-12-30T20:07:01.000Z | apps/poker/views/settings.py | deniskrumko/izyan-poker | ce70c9c8f761409adad289809e5220237b312407 | [
"MIT"
] | 8 | 2019-10-25T11:07:03.000Z | 2021-06-10T18:43:42.000Z | apps/poker/views/settings.py | deniskrumko/izyan-poker | ce70c9c8f761409adad289809e5220237b312407 | [
"MIT"
] | 1 | 2019-10-07T15:44:26.000Z | 2019-10-07T15:44:26.000Z | from core.views import BaseView, LoginRequiredMixin
from ..models import PokerMember, PokerRoom
class SettingsView(LoginRequiredMixin, BaseView):
template_name = 'settings.html'
def get(self, request, token):
"""Handle GET request."""
if not self.member:
return self.redirect('poker:room', args=(token,))
return super().get(request, token)
def post(self, request, token):
"""Handle POST request."""
# Exit room
if '_exit' in request.POST:
self.member.is_active = False
self.member.save()
return self.redirect('poker:index')
room_name = request.POST.get('room_name')
member_name = request.POST.get('member_name')
use_time = request.POST.get('use_time')
self.room.name = room_name
self.room.use_time = bool(int(use_time))
self.member.name = member_name
self.room.save()
self.member.save()
return self.redirect('poker:room', args=(token,))
def get_context_data(self, *args, **kwargs):
"""Get context data."""
return {
'room': self.room,
'member': self.member,
}
def dispatch(self, *args, **kwargs):
"""Dispatch request."""
self.user = (
self.request.user if self.request.user.is_authenticated else None
)
self.room = self.get_object_or_404(PokerRoom, token=kwargs['token'])
self.poker_round = self.room.get_poker_round()
self.member = PokerMember.objects.filter(
room=self.room,
user=self.user,
is_active=True,
).first()
return super().dispatch(*args, **kwargs)
| 29.655172 | 77 | 0.591279 | 199 | 1,720 | 4.994975 | 0.291457 | 0.070423 | 0.054326 | 0.069417 | 0.123742 | 0.123742 | 0.123742 | 0 | 0 | 0 | 0 | 0.002423 | 0.280233 | 1,720 | 57 | 78 | 30.175439 | 0.800485 | 0.050581 | 0 | 0.102564 | 0 | 0 | 0.057072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.051282 | 0 | 0.358974 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec58e48e50bae8cea63a104f2bb6e36683806f7e | 954 | py | Python | eureka/lib/models_c/py_func/fixipmapping.py | iancrossfield/Eureka | 88b178d1b830c16915045b6387cf91955e0071e2 | [
"MIT"
] | null | null | null | eureka/lib/models_c/py_func/fixipmapping.py | iancrossfield/Eureka | 88b178d1b830c16915045b6387cf91955e0071e2 | [
"MIT"
] | null | null | null | eureka/lib/models_c/py_func/fixipmapping.py | iancrossfield/Eureka | 88b178d1b830c16915045b6387cf91955e0071e2 | [
"MIT"
] | null | null | null |
def fixipmapping(ipparams, posflux, etc = [], retbinflux = False, retbinstd = False):
"""
This function returns the fixed best-fit intra-pixel mapping.
Parameters
----------
ipparams : tuple
unused
bestmip : 1D array, size = # of measurements
Best-fit ip mapping
Returns
-------
output : 1D array, size = # of measurements
Intra-pixel-corrected flux multiplier
Revisions
---------
2010-08-03 Kevin Stevenson, UCF
kevin218@knights.ucf.edu
Original version
"""
bestmip, binflux, binstd = posflux
#Return fit with or without binned flux
if retbinflux == False and retbinstd == False:
return bestmip
elif retbinflux == True and retbinstd == True:
return [bestmip, binflux, binstd]
elif retbinflux == True:
return [bestmip, binflux]
else:
return [bestmip, binstd]
| 26.5 | 85 | 0.584906 | 97 | 954 | 5.752577 | 0.57732 | 0.09319 | 0.039427 | 0.046595 | 0.089606 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019969 | 0.31761 | 954 | 35 | 86 | 27.257143 | 0.837174 | 0.549266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ec5935855d0506591f11a5c9e27865542c48e32b | 528 | py | Python | tests/wrapper/testsuite.py | gimbas/openinput | 9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693 | [
"MIT"
] | 38 | 2020-05-11T10:54:15.000Z | 2022-03-30T13:19:09.000Z | tests/wrapper/testsuite.py | gimbas/openinput | 9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693 | [
"MIT"
] | 45 | 2020-04-21T23:52:22.000Z | 2022-02-19T20:29:27.000Z | tests/wrapper/testsuite.py | gimbas/openinput | 9cbb4b22aebe46dfc33ae9c56b164baa6c1fe693 | [
"MIT"
] | 5 | 2020-08-29T02:10:42.000Z | 2021-08-31T03:12:15.000Z | # SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Filipe Laíns <lains@riseup.net>
from typing import List, Set
import _testsuite
import pages
class Device(_testsuite.Device):
def __init__(
self,
*,
name: str,
functions: Set[pages.Function],
) -> None:
super().__init__(
name=name,
functions=pages.functions_to_fw_page_array(functions),
)
def hid_send(self, data: List[int]):
'''``hid_send`` callback for the HID HAL'''
| 22 | 66 | 0.617424 | 61 | 528 | 5.081967 | 0.672131 | 0.045161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010363 | 0.268939 | 528 | 23 | 67 | 22.956522 | 0.792746 | 0.242424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b57fc39f4fd2e01c5a6e624252774aacda18038 | 2,578 | py | Python | samples/summarize-bot/dialogs/main_dialog.py | tsuwandy/botbuilder-community-python | e035a993cd3b0fd8c7b2ff1126c4e993d0c8efc3 | [
"MIT"
] | null | null | null | samples/summarize-bot/dialogs/main_dialog.py | tsuwandy/botbuilder-community-python | e035a993cd3b0fd8c7b2ff1126c4e993d0c8efc3 | [
"MIT"
] | null | null | null | samples/summarize-bot/dialogs/main_dialog.py | tsuwandy/botbuilder-community-python | e035a993cd3b0fd8c7b2ff1126c4e993d0c8efc3 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Main dialog. """
from botbuilder.dialogs import (
ComponentDialog,
WaterfallDialog,
WaterfallStepContext,
DialogTurnResult,
)
from botbuilder.dialogs.prompts import TextPrompt, PromptOptions
from botbuilder.core import MessageFactory
from github_summary_bot import MySummaryBot
from bots import DialogAndWelcomeBot
from .summarize_dialog import SummarizeDialog
class MainDialog(ComponentDialog):
"""Main dialog. """
def __init__(self, configuration: dict, dialog_id: str = None):
super(MainDialog, self).__init__(dialog_id or MainDialog.__name__)
self._configuration = configuration
self.add_dialog(TextPrompt(TextPrompt.__name__))
self.add_dialog(SummarizeDialog())
self.add_dialog(
WaterfallDialog(
"WFDialog", [self.intro_step, self.act_step, self.final_step]
)
)
self.initial_dialog_id = "WFDialog"
self.sum_bot = MySummaryBot()
async def intro_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
"""Initial prompt."""
result = self.sum_bot.update_state_reply(step_context.context.activity.text)
if (result == ''):
return await step_context.context.send_activity(DialogAndWelcomeBot.create_welcome_response(step_context.context.activity))
else:
return await step_context.prompt(
TextPrompt.__name__,
PromptOptions(
prompt=MessageFactory.text(result)
),
)
async def act_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
# Run the SummarizeDialog, dialog will prompt to find out the remaining details.
return await step_context.begin_dialog(SummarizeDialog.__name__, self.sum_bot)
async def final_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
"""Complete dialog.
At this step, display the summary for each comment and summary of all comments
"""
# If the child dialog ("SummarizeDialog") was cancelled or the user failed
# to confirm, the Result here will be null.
if step_context.result is not None:
result = step_context.result
await step_context.context.send_activity(MessageFactory.text(result))
else:
await step_context.context.send_activity(MessageFactory.text("Thank you."))
return await step_context.end_dialog()
| 39.661538 | 135 | 0.689294 | 274 | 2,578 | 6.244526 | 0.383212 | 0.083577 | 0.056108 | 0.051432 | 0.178843 | 0.178843 | 0.061952 | 0.061952 | 0 | 0 | 0 | 0 | 0.232351 | 2,578 | 64 | 136 | 40.28125 | 0.864578 | 0.121024 | 0 | 0.045455 | 0 | 0 | 0.012311 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.136364 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b5832f803883a22182b709c9785af4bb2e2a7ee | 3,642 | py | Python | handroll/composers/__init__.py | mblayman/handroll | 42703cf5c969dccd0eb0715402ab84056ab65e22 | [
"BSD-2-Clause"
] | null | null | null | handroll/composers/__init__.py | mblayman/handroll | 42703cf5c969dccd0eb0715402ab84056ab65e22 | [
"BSD-2-Clause"
] | null | null | null | handroll/composers/__init__.py | mblayman/handroll | 42703cf5c969dccd0eb0715402ab84056ab65e22 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2014, Matt Layman
import filecmp
import os
import shutil
import warnings
from pkg_resources import iter_entry_points
from handroll import logger
from handroll.i18n import _
class Composer(object):
"""Interface for all composers"""
def compose(self, catalog, source_file, out_dir):
"""Compose whatever appropriate output is generated by the composer.
:param catalog: the ``TemplateCatalog``
:param source_file: the filename of the source
:param out_dir: the directory to store output
"""
raise NotImplementedError
class Composers(object):
"""A collection of available composers"""
def __init__(self):
self._available_composers = {}
self._composers = {}
self.default_composer = CopyComposer()
# pkg_resources emits an annoying message related to security that is
# completely irritating for an average user to address. Filter it out.
#
# For the record, the warning is:
#
# pkg_resources.py:991: UserWarning: ~/.python-eggs is writable by
# group/others and vulnerable to attack when used with
# get_resource_filename. Consider a more secure location (set with
# .set_extraction_path or the PYTHON_EGG_CACHE environment variable).
#
# handroll assumes a level of trust in whatever is placed in the
# ``handroll.composers`` entry points.
warnings.filterwarnings('ignore', '.*get_resource_filename.*')
for entry_point in iter_entry_points('handroll.composers'):
cls = entry_point.load()
self._available_composers[entry_point.name] = cls
def select_composer_for(self, filename):
_, ext = os.path.splitext(filename)
return self._get_composer(ext)
def _get_composer(self, ext):
"""Get a composer. Lazy load composers for an extension so that an
individual composer only initializes when a file of its type is found.
"""
if ext not in self._composers:
if ext in self._available_composers:
self._composers[ext] = self._available_composers[ext]()
else:
self._composers[ext] = self.default_composer
return self._composers[ext]
class CopyComposer(Composer):
"""Copy a source file to the destination.
``CopyComposer`` is the default composer for any unrecognized file type.
The source file will be copied to the output directory unless there is a
file with an identical name and content already at the destination.
"""
def compose(self, catalog, source_file, out_dir):
"""Copy a file to the destination if the file does not exist or was
modified."""
filename = os.path.basename(source_file)
# Do not copy files that are already there unless different.
destination = os.path.join(out_dir, filename)
if os.path.exists(destination):
if filecmp.cmp(source_file, destination):
# Files are equal. Do nothing.
logger.debug(_('Skipping {filename} ... It is the same as '
'{destination}.').format(
filename=filename, destination=destination))
return
else:
logger.info(
_('{filename} differs from {destination} ...').format(
filename=filename, destination=destination))
logger.info(_('Copying {filename} to {out_dir} ...').format(
filename=filename, out_dir=out_dir))
shutil.copy(source_file, out_dir)
| 36.787879 | 78 | 0.643053 | 433 | 3,642 | 5.265589 | 0.374134 | 0.035088 | 0.038596 | 0.021053 | 0.111404 | 0.080702 | 0.032456 | 0.032456 | 0 | 0 | 0 | 0.003414 | 0.276222 | 3,642 | 98 | 79 | 37.163265 | 0.861533 | 0.375618 | 0 | 0.130435 | 0 | 0 | 0.08403 | 0.011606 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.152174 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b5b594a8ddc22e1ab3c6c0d7edc6fdc3c650bfc | 14,791 | py | Python | git_jdime.py | xai/jdime-utils | b5978a4572afbeaa3e4a9f72cfdccc0a14ee0cf8 | [
"MIT"
] | null | null | null | git_jdime.py | xai/jdime-utils | b5978a4572afbeaa3e4a9f72cfdccc0a14ee0cf8 | [
"MIT"
] | null | null | null | git_jdime.py | xai/jdime-utils | b5978a4572afbeaa3e4a9f72cfdccc0a14ee0cf8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (C) 2017 Olaf Lessenich
import argparse
import csv
import os
import sys
import tempfile
import time
import signal
import statistics
import psutil
from plumbum import colors
from plumbum import local
from plumbum.cmd import grep
from plumbum.commands.processes import ProcessExecutionError
from xml.etree import ElementTree as ET
from subprocess import TimeoutExpired
GIT = local['git']
STRATEGY = '$$STRATEGY$$'
COLS = ['project', 'timestamp', 'merge', 'left', 'right', 'file', 'mergetype',
'strategies', 'target', 'cmd', 'loc_in']
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def get_merge_commits(before):
if before:
return GIT['rev-list', '--all', '--merges', '--reverse',
'--before', before]().splitlines()
else:
return GIT['rev-list', '--all', '--merges', '--reverse']().splitlines()
def get_jobs(target, strategies=None, jdimeopts=None, noop=False, statedir=None, commits=[]):
options = ["-o", target]
if strategies:
options.append("-m")
options.append(','.join(strategies))
if noop:
options.append("-n")
if jdimeopts:
options.append("-j")
options.append(jdimeopts)
if statedir:
options.append("-s")
options.append(statedir)
return csv.DictReader(iter(GIT['preparemerge', options, commits]()\
.splitlines()), delimiter=';', fieldnames=COLS)
def count_conflicts(merged_file):
conflicts = 0
try:
m1 = int(grep['-c', '-e', '^<<<<<<<', merged_file]().strip())
m2 = int(grep['-c', '-e', '^=======', merged_file]().strip())
m3 = int(grep['-c', '-e', '^>>>>>>>', merged_file]().strip())
conflicts = min(m1, m2, m3)
except ProcessExecutionError:
pass
return conflicts
def run(job, prune, writer, runs=1, srcfile=None, noop=False):
if noop:
writer = csv.DictWriter(sys.stdout, delimiter=';', fieldnames=COLS)
writer.writerow(job)
return
project = job['project']
timestamp = job['timestamp']
mergecommit = job['merge'][0:7]
left = job['left'][0:7]
right = job['right'][0:7]
file = job['file']
target = job['target']
mergetype = job['mergetype']
timeout = 1800
fail = False
if mergetype == "skipped":
writer.writerow([project,
timestamp,
mergecommit,
left,
right,
file,
mergetype,
job["cmd"],
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
job["loc_in"],
0,
jdimeversion])
return
if not srcfile or srcfile == file:
errorlog = os.path.join(target, 'error.log')
strategies = job['strategies'].split(',')
for strategy in strategies:
strategy = strategy.replace('+', ',')
scenario = '%s %s %s %s %s %s %s %s' % (project, timestamp,
mergecommit, left, right,
file, mergetype, strategy)
cmd = job['cmd'].replace(STRATEGY, strategy).split(' ')
exe = cmd[0]
args = cmd[1:]
outfile = args[7]
runtimes = []
for i in range(runs):
if os.path.exists(outfile):
os.remove(outfile)
t0 = time.time()
# ret, stdout, stderr = local[exe][args].run(retcode=None)
p = local[exe][args].popen()
try:
stdout, stderr = p.communicate(timeout=timeout)
ret = p.returncode
t1 = time.time()
runtimes.append(t1 - t0)
except TimeoutExpired:
kill(p.pid)
stdout = ''
stderr = ('Timeouted after %d seconds.\r\n' % (timeout)).encode("utf-8")
ret = -5
t1 = time.time()
runtimes.append(t1 - t0)
break
runtime = statistics.median(runtimes)
if ret >= 0 and ret <= 127:
tree = ET.fromstring(stdout)
conflicts = int(tree.find("./mergescenariostatistics/conflicts").text)
clines = int(tree.find('./mergescenariostatistics/lineStatistics').attrib['numOccurInConflict'])
ctokens = int(tree.find('./mergescenariostatistics/tokenStatistics').attrib['numOccurInConflict'])
parsed_conflicts = count_conflicts(outfile)
loc_out = int(local['wc']['-l', outfile]().split(' ')[0])
xmlruntimes={'merge': None,
'parse': None,
'semistructure': None,
'LinebasedStrategy': None,
'SemiStructuredStrategy': None,
'StructuredStrategy': None}
for e in tree.findall("./mergescenariostatistics/runtime"):
for label in xmlruntimes:
if label == e.attrib['label']:
xmlruntimes[label] = int(e.attrib['timeMS'])
if not writer:
print('%s: ' % scenario, end='')
if conflicts > 0:
print(colors.cyan | ('OK (%d conflicts)' % conflicts))
else:
print(colors.green | 'OK')
else:
writer.writerow([project,
timestamp,
mergecommit,
left,
right,
file,
mergetype,
strategy,
conflicts,
clines,
ctokens,
parsed_conflicts,
runtime,
xmlruntimes['merge'],
xmlruntimes['parse'],
xmlruntimes['semistructure'],
xmlruntimes['LinebasedStrategy'],
xmlruntimes['SemiStructuredStrategy'],
xmlruntimes['StructuredStrategy'],
job["loc_in"],
loc_out,
jdimeversion])
else:
fail = True
if not writer:
print('%s: ' % scenario, end='', file=sys.stderr)
print(colors.red | ('FAILED (%d)' % ret), file=sys.stderr)
else:
writer.writerow([project,
timestamp,
mergecommit,
left,
right,
file,
'FAILED (' + str(ret) + ')',
strategy,
'',
'',
'',
'',
runtime,
'',
'',
'',
'',
'',
'',
job["loc_in"],
'',
jdimeversion])
with open(errorlog, 'a') as err:
err.write(80 * '=' + '\r\n')
err.write(scenario + '\r\n')
err.write('> %s\r\n' % ' '.join(cmd))
err.write(80 * '-' + '\r\n')
err.writelines(stderr.decode("utf-8"))
err.write(80 * '-' + '\r\n')
if prune and not fail:
for root, dirs, files in os.walk(target, topdown=False):
for f in files:
path = os.path.join(root, f)
if path.endswith(file):
os.remove(path)
if not os.listdir(root):
os.rmdir(root)
def write_state(project, commit, strategies, statedir):
if statedir:
statefile = os.path.join(statedir, project)
if os.path.exists(statefile):
with open(statefile, 'r') as f:
for done in csv.DictReader(f, delimiter=';', fieldnames=['project',
'commit',
'strategy']):
if project == done['project'] and commit == done['commit']:
if done['strategy'] in strategies:
strategies.remove(done['strategy'])
if len(strategies) == 0:
return
with open(statefile, 'a') as f:
statewriter = csv.writer(f, delimiter=';')
for strategy in strategies:
statewriter.writerow([project,
commit,
strategy])
def main():
global jdimeversion
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output',
help='Store output in this directory',
type=str)
parser.add_argument('-m', '--modes',
help='Strategies to be prepared, separated by comma',
type=str,
default='structured,linebased')
parser.add_argument('-j', '--jdimeopts',
help='Additional options to pass to jdime',
type=str)
parser.add_argument('-f', '--file',
help='Merge only specified file',
type=str)
parser.add_argument('-p', '--prune',
help='Prune successfully merged scenarios',
action="store_true")
parser.add_argument('-c', '--csv',
help='Print in csv format',
action="store_true")
parser.add_argument('-H', '--header',
help='Include csv header',
action="store_true")
parser.add_argument('-n', '--noop',
help='Do not actually run',
action="store_true")
parser.add_argument('-s', '--statedir',
help='Use state files to skip completed tasks',
type=str)
parser.add_argument('-b', '--before',
help='Use only commits before <date>',
type=str)
parser.add_argument('-r', '--runs',
help='Run task this many times (e.g., for benchmarks)',
type=int,
default=1)
parser.add_argument('-t', '--tag',
help='Append this tag to each line',
type=str)
parser.add_argument('commits', default=[], nargs='+')
args = parser.parse_args()
strategies = args.modes.split(',')
writer = None
if args.csv:
writer = csv.writer(sys.stdout, delimiter=';')
if args.header:
outputcols = ['project',
'timestamp',
'mergecommit',
'left',
'right',
'file',
'mergetype',
'strategy',
'conflicts',
'clines',
'ctokens',
'parsed_conflicts',
'runtime',
't_merge',
't_parse',
't_semistructure',
't_LinebasedStrategy',
't_SemiStructuredStrategy',
't_StructuredStrategy',
'loc_in',
'loc_out',
'jdimeversion']
writer.writerow(outputcols)
if args.output:
target = args.output
else:
target = tempfile.mkdtemp(prefix="jdime.")
if args.statedir:
if not os.path.exists(args.statedir):
os.makedirs(args.statedir)
if args.tag:
jdimeversion = args.tag
else:
jdimeversion = local['jdime']['-v']().strip()
if args.runs > 1:
jdimeversion += " runs:" + str(args.runs)
# make sure this doesn't interfere with our csv delimiter
jdimeversion.replace(';', ',')
project = os.path.basename(os.getcwd())
commits = args.commits
if len(commits) == 1 and commits[0] == 'all':
for commit in get_merge_commits(args.before):
for job in get_jobs(target, strategies, args.jdimeopts, args.noop, args.statedir, [commit,]):
run(job, args.prune, writer, args.runs, args.file, args.noop)
write_state(project, commit, strategies.copy(), args.statedir)
else:
for job in get_jobs(target, strategies, args.jdimeopts, args.noop, args.statedir, commits):
run(job, args.prune, writer, args.runs, args.file, args.noop)
for commit in commits:
write_state(project, commit, strategies.copy(), args.statedir)
if args.prune and os.path.exists(target) and not os.listdir(target):
os.rmdir(target)
elif not args.csv:
print()
if args.prune:
stored = 'Erroneous'
else:
stored = 'All'
print('%s merge scenarios have been stored to %s' % (stored, target))
if __name__ == "__main__":
main()
| 38.719895 | 114 | 0.423366 | 1,211 | 14,791 | 5.120562 | 0.235343 | 0.018868 | 0.035639 | 0.015481 | 0.219803 | 0.181906 | 0.156426 | 0.115143 | 0.090147 | 0.067731 | 0 | 0.006778 | 0.461362 | 14,791 | 381 | 115 | 38.821522 | 0.771558 | 0.011358 | 0 | 0.274854 | 0 | 0 | 0.118005 | 0.014845 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020468 | false | 0.005848 | 0.04386 | 0 | 0.084795 | 0.020468 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b5ba9e7cc299303aeb45f7e67c51d27bbdf3426 | 3,863 | py | Python | wagtail_automatic_redirects/signal_handlers.py | tbrlpld/wagtail-automatic-redirects | 70ff04c7aeb64916f6827c4b84616a4c96d64a5e | [
"BSD-3-Clause"
] | 13 | 2020-02-13T20:55:32.000Z | 2021-12-11T21:20:20.000Z | wagtail_automatic_redirects/signal_handlers.py | tbrlpld/wagtail-automatic-redirects | 70ff04c7aeb64916f6827c4b84616a4c96d64a5e | [
"BSD-3-Clause"
] | 6 | 2020-05-19T21:06:20.000Z | 2021-05-28T13:31:09.000Z | wagtail_automatic_redirects/signal_handlers.py | tbrlpld/wagtail-automatic-redirects | 70ff04c7aeb64916f6827c4b84616a4c96d64a5e | [
"BSD-3-Clause"
] | 4 | 2020-05-19T13:40:05.000Z | 2021-03-03T21:36:48.000Z | from wagtail import VERSION as WAGTAIL_VERSION
if WAGTAIL_VERSION >= (2, 0):
from wagtail.core.signals import page_published
from wagtail.contrib.redirects.models import Redirect
if WAGTAIL_VERSION >= (2, 10):
from wagtail.core.signals import post_page_move
else:
post_page_move = None
else:
from wagtail.wagtailcore.signals import page_published
from wagtail.wagtailredirects.models import Redirect
# Create redirect from old slug to new if slug changed in published page.
# Redirect will be created for Page and all it's children.
# It will not work when page moved in the site tree.
def create_redirect_object_if_slug_changed(sender, **kwargs):
instance = kwargs['instance']
# The main part is getting the old URL from which the redirect is required.
# Wagtail keeps the record of every page change in terms of revisions.
# This will help to keep track of every change made to page including
# page slug. The next part is determining the revision is for draft or
# published page. For example, an admin user start editing the page
# (with slug /original) change Url (/original-changed) and save as draft.
# On next edit, user again change the URL to something else
# (/original-desired) and then publish the page. So, in this case, redirect
# should be created from /original to /original-desired. Page object that
# has has_unpublished_changes value True, is draft revision. Interestingly
# when admin user edit a page, user is editing the page object created from
# JSON and value is stored as JSON in revision.
page_revisions = instance.revisions.order_by('-created_at', '-id')
for revision in page_revisions:
page_obj = revision.page.specific_class.from_json(
revision.content_json)
# The first revision's page object that has has_published_changes
# value False is the last published Page.
if not page_obj.has_unpublished_changes:
# Only create redirect if slug change
if instance.url != page_obj.url:
old_path = Redirect.normalise_path(page_obj.url)
Redirect.objects.update_or_create(
old_path=old_path,
defaults={
'redirect_page': instance
}
)
# Also create redirect objects for children of this Page
create_redirect_objects_for_children(old_path, page_obj)
break
def create_redirect_object_after_page_move(sender, **kwargs):
if kwargs['url_path_before'] == kwargs['url_path_after']:
return
page_after = kwargs['instance']
parent_page_before_url = kwargs['parent_page_before'].get_url()
page_before_url = Redirect.normalise_path(
parent_page_before_url + page_after.slug
)
Redirect.objects.update_or_create(
old_path=page_before_url,
defaults={
'redirect_page': page_after
}
)
create_redirect_objects_for_children(page_before_url, page_after)
# Register receivers
def register_signal_handlers():
page_published.connect(create_redirect_object_if_slug_changed)
if post_page_move is not None:
post_page_move.connect(create_redirect_object_after_page_move)
def create_redirect_objects_for_children(parent_old_slug, parent):
if not parent.get_children():
return
else:
for child_page in parent.get_children():
old_path = Redirect.normalise_path(
parent_old_slug + '/' + child_page.slug)
Redirect.objects.update_or_create(
old_path=old_path,
defaults={
'redirect_page': child_page
}
)
create_redirect_objects_for_children(old_path, child_page)
| 39.418367 | 79 | 0.683407 | 509 | 3,863 | 4.943026 | 0.265226 | 0.061208 | 0.041733 | 0.047695 | 0.294515 | 0.183625 | 0.101749 | 0.101749 | 0.050079 | 0.050079 | 0 | 0.001737 | 0.254983 | 3,863 | 97 | 80 | 39.824742 | 0.872481 | 0.31271 | 0 | 0.213115 | 0 | 0 | 0.044419 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0 | 0.098361 | 0 | 0.196721 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b5c6925b389b1b726a316fd95e267319608e4ab | 1,377 | py | Python | main.py | tegarimansyah/download-folder-manager | 1c93e5a513107fd65ac74ec5afb5f203d739b75e | [
"MIT"
] | null | null | null | main.py | tegarimansyah/download-folder-manager | 1c93e5a513107fd65ac74ec5afb5f203d739b75e | [
"MIT"
] | null | null | null | main.py | tegarimansyah/download-folder-manager | 1c93e5a513107fd65ac74ec5afb5f203d739b75e | [
"MIT"
] | null | null | null | import os
import glob
import pprint
from pathlib import Path
# Setup
pp = pprint.PrettyPrinter(indent=4)
# All variable
DOWNLOAD_FOLDER_PATH = os.path.join( # add trailing slash
os.getenv('DOWNLOAD_FOLDER_PATH', f'{Path.home()}/Downloads/') # get from env
)
def extract_file_data(filepath):
_, file_extension = os.path.splitext(filepath)
return {
'ext': file_extension,
'last_modified': os.path.getmtime(filepath),
'filepath': filepath
}
def ext_mapping(list_of_files):
return set( file_object.get('ext').lower() for file_object in list_of_files)
def file_grouping(list_of_ext, list_of_files):
ext_counter = {}
ext_group = {}
for ext in list_of_ext:
list_filter = tuple(filter(lambda files: files.get('ext') == ext, list_of_files))
ext_group[ext] = list_filter
ext_counter[ext] = len(list_filter)
return ext_counter, ext_group
if __name__ == "__main__":
filepaths = glob.glob(DOWNLOAD_FOLDER_PATH + '*')
filepaths.sort(key=os.path.getmtime) # sort by time modified, unix time, asc (last is newest)
files = tuple(map(extract_file_data, filepaths))
extensions = ext_mapping(files)
extension_counter, file_group = file_grouping(extensions, files)
pp.pprint(extension_counter)
# Sample, get all .zip files
pp.pprint(file_group.get('.zip'))
| 28.6875 | 97 | 0.69281 | 189 | 1,377 | 4.767196 | 0.37037 | 0.039956 | 0.048835 | 0.028857 | 0.037736 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000902 | 0.194626 | 1,377 | 47 | 98 | 29.297872 | 0.811542 | 0.095861 | 0 | 0 | 0 | 0 | 0.070331 | 0.019402 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.121212 | 0.030303 | 0.30303 | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b5c790a442c8ec83937f33d65197c87f874fb7f | 4,295 | py | Python | dane/handlers/RabbitMQHandler.py | CLARIAH/DANE-util | 8a3edec69be18ac3bdee476b65059409af05c1bb | [
"Apache-2.0"
] | 2 | 2020-11-24T11:03:14.000Z | 2021-03-25T13:25:35.000Z | DANE/handlers/RabbitMQHandler.py | CLARIAH/DANE | c27b11d6fe6dc1da5097d90b32bcee64fdc27837 | [
"Apache-2.0"
] | 1 | 2019-12-11T19:46:20.000Z | 2019-12-11T21:30:38.000Z | DANE/handlers/RabbitMQHandler.py | CLARIAH/DANE | c27b11d6fe6dc1da5097d90b32bcee64fdc27837 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-present, Netherlands Institute for Sound and Vision (Nanne van Noord)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pika
import sys
import json
import threading
from time import sleep
import functools
import logging
MAX_RETRY = 8
RETRY_INTERVAL = 2 # seconds
logger = logging.getLogger('DANE')
class RabbitMQHandler():
def __init__(self, config):
self.config = config
self.callback = None
self.retry = 0
self.connect()
def connect(self):
if not hasattr(self, 'connection') or \
not self.connection or self.connection.is_closed:
credentials = pika.PlainCredentials(
self.config.RABBITMQ.USER,
self.config.RABBITMQ.PASSWORD)
try:
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(
credentials=credentials,
host=self.config.RABBITMQ.HOST,
port=self.config.RABBITMQ.PORT))
except (pika.exceptions.AMQPConnectionError,
pika.exceptions.ConnectionClosedByBroker) as e:
self.retry += 1
if self.retry <= MAX_RETRY:
nap_time = RETRY_INTERVAL ** self.retry
logger.warning('RabbitMQ Connection Failed. '\
'RETRYING in {} seconds'.format(nap_time))
sleep(nap_time)
self.connect()
else:
logger.critical(
'RabbitMQ connection failed, no retries left')
raise e from None
else:
self.retry = 0
self.channel = self.connection.channel()
self.pub_channel = self.connection.channel()
self.pub_channel.confirm_delivery()
self.channel.exchange_declare(
exchange=self.config.RABBITMQ.EXCHANGE,
exchange_type='topic')
self.channel.queue_declare(
queue=self.config.RABBITMQ.RESPONSE_QUEUE,
durable=True)
def run(self):
raise NotImplementedError('Run should be implemented server-side')
def stop(self):
raise NotImplementedError('Stop should be implemented server-side')
def assign_callback(self, callback):
raise NotImplementedError('assign_callback should be implemented server-side')
def publish(self, routing_key, task, document, retry=False):
try:
self.pub_channel.basic_publish(
exchange=self.config.RABBITMQ.EXCHANGE,
routing_key=routing_key,
properties=pika.BasicProperties(
reply_to=self.config.RABBITMQ.RESPONSE_QUEUE,
correlation_id=str(task._id),
priority=int(task.priority),
delivery_mode=2
),
mandatory=True,
body=json.dumps({
# flipflop between json and object is intentional
# but maybe not most elegant way..
'task': json.loads(task.to_json()),
'document': json.loads(document.to_json())
}))
except pika.exceptions.ChannelWrongStateError as e:
if not retry: # retry once
logger.exception('Publish error')
self.connect()
self.publish(routing_key, task, document, retry=True)
else:
raise e
except Exception as e:
raise e
| 37.675439 | 86 | 0.56298 | 431 | 4,295 | 5.531323 | 0.417633 | 0.041946 | 0.060403 | 0.03146 | 0.149748 | 0.072567 | 0.032299 | 0 | 0 | 0 | 0 | 0.00498 | 0.345518 | 4,295 | 113 | 87 | 38.00885 | 0.843116 | 0.164843 | 0 | 0.168675 | 0 | 0 | 0.074764 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072289 | false | 0.012048 | 0.084337 | 0 | 0.168675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b5f1427746108b86dc7e91797ab19861fcd228b | 947 | py | Python | src/Exercise_3/src/plots.py | djeada/Stanford-Machine-Learning | e6ef77939b7c581aebb5e9454669ad2dbb4f98f0 | [
"MIT"
] | null | null | null | src/Exercise_3/src/plots.py | djeada/Stanford-Machine-Learning | e6ef77939b7c581aebb5e9454669ad2dbb4f98f0 | [
"MIT"
] | null | null | null | src/Exercise_3/src/plots.py | djeada/Stanford-Machine-Learning | e6ef77939b7c581aebb5e9454669ad2dbb4f98f0 | [
"MIT"
] | null | null | null | """"
The goal of this module is to implement all the visualization
tools needed to graph the data and results of the computations
for the Task 3 from the coding homeworks in the Machine Learning
course on coursera.com.
"""
import numpy as np
import matplotlib.pyplot as plt
def display_random_grid(x: np.ndarray, n: int = 20, indices: np.ndarray = None) -> None:
""""
Display a grid with n digits on it. If no indices are specified,
a grid of n random digits is displayed.
Args:
x:
An array containing 5000 images. Each image is a row. Each image contains 400 pixels (20x20).
n:
Number of digits to be displayed.
indices:
The indices of the digits in matrix x.
Returns:
None
"""
if indices is None:
indices = np.random.choice(x.shape[0], n)
plt.figure(figsize=(6, 6))
image = x[indices, 1:].reshape(-1, n).T
plt.imshow(image)
plt.axis("off")
| 27.057143 | 100 | 0.658923 | 150 | 947 | 4.146667 | 0.573333 | 0.016077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026912 | 0.254488 | 947 | 34 | 101 | 27.852941 | 0.854108 | 0.575502 | 0 | 0 | 0 | 0 | 0.008798 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b5fd207e69fc80407e2e9f565ac8467717e39c5 | 986 | py | Python | dit/object_detection/adaptive_binarize.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | 1 | 2021-11-07T00:30:05.000Z | 2021-11-07T00:30:05.000Z | dit/object_detection/adaptive_binarize.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | dit/object_detection/adaptive_binarize.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | import argparse
import os
import cv2
import tqdm
def convert(fn):
# given a file name, convert it into binary and store at the same position
img = cv2.imread(fn)
gim = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gim = cv2.adaptiveThreshold(gim, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 45, 11)
g3im = cv2.cvtColor(gim, cv2.COLOR_GRAY2BGR)
cv2.imwrite(fn, g3im)
if __name__ == '__main__':
"""
Now only feasible for trackA_XX
"""
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default="../datasets/icdar2019/at_trackA_archival")
args = parser.parse_args()
for fdname in os.listdir(args.root_dir):
if fdname.endswith(".json"):
continue
ffdname = os.path.join(args.root_dir, fdname)
for file in tqdm.tqdm(os.listdir(ffdname)):
if file.endswith(".xml"):
continue
ffile = os.path.join(ffdname, file)
convert(ffile)
| 29 | 100 | 0.649087 | 131 | 986 | 4.717557 | 0.526718 | 0.029126 | 0.035599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033245 | 0.237323 | 986 | 33 | 101 | 29.878788 | 0.788564 | 0.073022 | 0 | 0.086957 | 0 | 0 | 0.077457 | 0.046243 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.173913 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b62a1b0dff941851becc78ed4922a7c9853d341 | 3,685 | py | Python | src/mode/modeopening3.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | 2 | 2017-05-14T06:37:14.000Z | 2022-03-07T02:25:32.000Z | src/mode/modeopening3.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | 2 | 2017-10-08T19:41:18.000Z | 2021-04-08T04:40:50.000Z | src/mode/modeopening3.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | null | null | null | import random
from collections import deque
import pygame
import constants
import shared
from monster import Monster
from .modeintroduction0 import ModeIntroduction0
from .modeopening import ModeOpening
class ModeOpening3(ModeOpening):
GROUND_LEVEL = constants.SCREEN_SIZE[1] - 8
CENTER_TIME = 2500
TRANSITION_TIME = 750
EMPTY_TIME = 250
FULL_MONSTER_WAIT_TIME = EMPTY_TIME + TRANSITION_TIME + CENTER_TIME + TRANSITION_TIME
__slots__ = (
'monsters',
'wait_time',
'last_level',
'background',
'initial_wait_time',
)
def __init__(self):
super().__init__()
# static elements setup
self.background = pygame.Surface(constants.SCREEN_SIZE).convert(shared.display.screen)
self.background.fill(constants.WHITE)
shared.font_wrap.renderToCentered(
self.background,
(constants.SCREEN_SIZE[0] // 2, constants.SCREEN_SIZE[1] // 2 + 4),
"press any key to start",
False,
constants.BLACK
)
logo = pygame.image.load(constants.CHIKKAI_LOGO).convert(shared.display.screen)
self.background.blit(
logo,
(
constants.SCREEN_SIZE[0] // 2 - logo.get_width() // 2,
constants.SCREEN_SIZE[1] // 4 - logo.get_height() // 2,
)
)
# monster loop setup
self.last_level = 3
self.monsters = deque((), 3)
monster = self._getMonster(0, 3)
# start the first one in the center
monster.rect.midbottom = (constants.SCREEN_SIZE[0] // 2, self.GROUND_LEVEL)
monster.anims.popleft()
monster.anims.popleft()
self.monsters.append(monster)
self.wait_time = self.CENTER_TIME + self.TRANSITION_TIME
self.monsters.append(self._getMonster(self.wait_time))
self.wait_time += self.FULL_MONSTER_WAIT_TIME
self.monsters.append(self._getMonster(self.wait_time))
self.wait_time += self.FULL_MONSTER_WAIT_TIME
self.initial_wait_time = self.wait_time
def _getMonster(self, wait_time, level=None):
# wait_time is how much time until the previous mon is off the screen
if level is None:
level = random.choice(
[i for i in range(1, 4) if i != self.last_level]
)
monster = Monster.atLevel(level)
self.last_level = level
self.all_sprites.add(monster)
monster.rect.midbottom = (
constants.SCREEN_SIZE[0] + monster.rect.width // 2,
self.GROUND_LEVEL
)
monster.addWait(wait_time + self.EMPTY_TIME)
monster.addPosAbs(
Monster.Lerp,
self.TRANSITION_TIME,
constants.SCREEN_SIZE[0] // 2,
self.GROUND_LEVEL - monster.rect.height // 2
)
monster.addWait(self.CENTER_TIME)
monster.addPosAbs(
Monster.Lerp,
self.TRANSITION_TIME,
monster.rect.width // -2,
self.GROUND_LEVEL - monster.rect.height // 2
)
return monster
def _switchMode(self):
self.next_mode = ModeIntroduction0()
def _update(self, dt):
self.wait_time -= dt
# every so often, set up additional looping monsters here, so we don't run out
if self.wait_time < self.initial_wait_time - self.FULL_MONSTER_WAIT_TIME:
monster = self._getMonster(self.wait_time)
self.monsters[0].kill()
self.monsters.append(monster)
self.wait_time += self.FULL_MONSTER_WAIT_TIME
def _drawScreen(self, screen):
screen.blit(self.background, (0, 0))
| 34.439252 | 94 | 0.617096 | 432 | 3,685 | 5.050926 | 0.275463 | 0.08066 | 0.071494 | 0.058662 | 0.403758 | 0.362053 | 0.311641 | 0.252521 | 0.11549 | 0.076077 | 0 | 0.017169 | 0.288738 | 3,685 | 106 | 95 | 34.764151 | 0.815338 | 0.05943 | 0 | 0.186813 | 0 | 0 | 0.021965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054945 | false | 0 | 0.087912 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b630feef7b82478ebf730437b4ab760f83635c1 | 1,651 | py | Python | onadata/apps/logger/management/commands/sync_deleted_instances_fix.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | onadata/apps/logger/management/commands/sync_deleted_instances_fix.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | onadata/apps/logger/management/commands/sync_deleted_instances_fix.py | ubpd/kobocat | 45906e07e8f05c30e3e26bab5570a8ab1ee264db | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 fileencoding=utf-8
# coding: utf-8
from __future__ import unicode_literals, print_function, division, absolute_import
import json
from django.conf import settings
from django.core.management import BaseCommand
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from django.utils.translation import ugettext_lazy
from onadata.apps.logger.models import Instance
class Command(BaseCommand):
help = ugettext_lazy("Fixes deleted instances by syncing "
"deleted items from mongo.")
def handle(self, *args, **kwargs):
# Reset all sql deletes to None
Instance.objects.exclude(
deleted_at=None, xform__downloadable=True).update(deleted_at=None)
# Get all mongo deletes
query = '{"$and": [{"_deleted_at": {"$exists": true}}, ' \
'{"_deleted_at": {"$ne": null}}]}'
query = json.loads(query)
xform_instances = settings.MONGO_DB.instances
cursor = xform_instances.find(query)
for record in cursor:
# update sql instance with deleted_at datetime from mongo
try:
i = Instance.objects.get(
uuid=record["_uuid"], xform__downloadable=True)
except Instance.DoesNotExist:
continue
else:
deleted_at = parse_datetime(record["_deleted_at"])
if not timezone.is_aware(deleted_at):
deleted_at = timezone.make_aware(
deleted_at, timezone.utc)
i.set_deleted(deleted_at)
| 36.688889 | 82 | 0.63295 | 193 | 1,651 | 5.227979 | 0.518135 | 0.098117 | 0.044599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004195 | 0.278013 | 1,651 | 44 | 83 | 37.522727 | 0.842282 | 0.11387 | 0 | 0 | 0 | 0 | 0.105697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.258065 | 0 | 0.354839 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b63ba2575b08e6d1979a05d2e94572ca308d8dd | 1,149 | py | Python | YOLO TESTS/grobotUtils.py | Lilly7777/GRobot---Server | d6261b72215ba0cdc281387c23427b04b2a9311d | [
"MIT"
] | null | null | null | YOLO TESTS/grobotUtils.py | Lilly7777/GRobot---Server | d6261b72215ba0cdc281387c23427b04b2a9311d | [
"MIT"
] | null | null | null | YOLO TESTS/grobotUtils.py | Lilly7777/GRobot---Server | d6261b72215ba0cdc281387c23427b04b2a9311d | [
"MIT"
] | null | null | null |
def convertBack(x, y, w, h):
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def cvDrawBoxes(detections, img):
# Colored labels dictionary
color_dict = {
'Tin can' : [0, 255, 255], 'Bottle': [238, 123, 158]
}
for label, confidence, bbox in detections:
x, y, w, h = (bbox[0],
bbox[1],
bbox[2],
bbox[3])
name_tag = label
for name_key, color_val in color_dict.items():
if name_key == name_tag:
color = color_val
xmin, ymin, xmax, ymax = convertBack(
float(x), float(y), float(w), float(h))
pt1 = (xmin, ymin)
pt2 = (xmax, ymax)
cv2.rectangle(img, pt1, pt2, color, 1)
cv2.putText(img,
name_tag +
" [" + confidence + "]",
(pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
color, 2)
return img | 32.828571 | 80 | 0.442124 | 138 | 1,149 | 3.601449 | 0.384058 | 0.064386 | 0.012072 | 0.016097 | 0.088531 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06006 | 0.420366 | 1,149 | 35 | 81 | 32.828571 | 0.686186 | 0.021758 | 0 | 0 | 0 | 0 | 0.01426 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b6c4918437ff01fd308871c7f88d92fe127782e | 7,590 | py | Python | offrl/base.py | dlqudwns/RepB-SDE | d799c3bbfc9aeca9251dfa84255d1c1b90af42ce | [
"MIT"
] | null | null | null | offrl/base.py | dlqudwns/RepB-SDE | d799c3bbfc9aeca9251dfa84255d1c1b90af42ce | [
"MIT"
] | null | null | null | offrl/base.py | dlqudwns/RepB-SDE | d799c3bbfc9aeca9251dfa84255d1c1b90af42ce | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
SCALE_DIAG_MIN_MAX = (-20, 2)
EPS = 1e-6
def apply_squashing_func(sample, logp):
"""
Squash the ouput of the gaussian distribution and account for that in the log probability.
:param sample: (tf.Tensor) Action sampled from Gaussian distribution
:param logp: (tf.Tensor) Log probability before squashing
"""
# Squash the output
squashed_action = tf.tanh(sample)
squashed_action_logp = logp - tf.reduce_sum(tf.log(1 - squashed_action ** 2 + 1e-6), axis=1)
# incurred by change of variable
return squashed_action, squashed_action_logp
class OnlineReplayBuffer:
def __init__(self, state_dim, action_dim, buffer_size):
self.buffer_size = buffer_size
self.obs = np.zeros([buffer_size, state_dim])
self.action = np.zeros([buffer_size, action_dim])
self.reward = np.zeros([buffer_size, 1])
self.next_obs = np.zeros([buffer_size, state_dim])
self.done = np.zeros([buffer_size, 1])
self._pointer = 0
self.size = 0
self.buffer = [self.obs, self.action, self.reward, self.next_obs, self.done]
def add_samples(self, *samples):
num_samples = len(samples[0])
index = np.arange(self._pointer, self._pointer + num_samples) % self.buffer_size
for buf, new_samples in zip(self.buffer, samples):
assert len(new_samples) == num_samples
buf[index] = new_samples
self._pointer = (self._pointer + num_samples) % self.buffer_size
self.size = min(self.size + num_samples, self.buffer_size)
def add_sample(self, *sample):
none_sample = [np.array(each)[None] for each in sample]
self.add_samples(*none_sample)
def can_sample(self, batch_size):
return self.size >= batch_size
def sample(self, batch_size):
indices = np.random.randint(0, self.size, size=batch_size)
return [each[indices] for each in self.buffer]
def sample_obs(self, batch_size):
indices = np.random.randint(0, self.size, size=batch_size)
return self.obs[indices]
def format_for_model_training(self):
obs, action, next_obs, reward = self.obs[:self.size], \
self.action[:self.size], self.next_obs[:self.size], self.reward[:self.size]
inputs = np.concatenate([obs, action], axis=-1)
targets = np.concatenate([reward, next_obs - obs], axis=-1)
return inputs, targets
# Simple replay buffer
class OfflineReplayBuffer:
def __init__(self, obs, action, reward, next_obs, done):
self.obs, self.action, self.reward, self.next_obs, self.done \
= obs, action, reward, next_obs, done
self.obs_mean = np.mean(self.obs, axis=0, keepdims=True)
self.obs_std = np.std(self.obs, axis=0, keepdims=True) + 1e-3
self.stan_obs = self.standardizer(np.array(self.obs))
self.stan_next_obs = self.standardizer(np.array(self.next_obs))
def standardizer(self, obs):
return (obs - self.obs_mean) / self.obs_std
def unstandardizer(self, obs):
return obs * self.obs_std + self.obs_mean
def format_for_model_training(self):
inputs = np.concatenate([self.stan_obs, self.action], axis=-1)
delta_obs = self.stan_next_obs - self.stan_obs
targets = np.concatenate([np.array(self.reward)[:, None], delta_obs], axis=-1)
terminals = np.reshape(np.array(self.done), [-1, 1])
return inputs, targets, terminals
def sample(self, batch_size):
obs, action, reward, next_obs, done = [], [], [], [], []
indices = np.random.randint(0, len(self.obs), size=batch_size)
for idx in indices:
obs.append(self.obs[idx])
action.append(self.action[idx])
reward.append(self.reward[idx])
next_obs.append(self.next_obs[idx])
done.append(self.done[idx])
obs, next_obs, action = np.array(obs), np.array(next_obs), np.array(action)
obs, next_obs = self.standardizer(obs), self.standardizer(next_obs)
return obs, action, np.array(reward)[:, None], next_obs, np.array(done)[:, None]
def sample_obs(self, batch_size):
indices = np.random.randint(0, len(self.obs), size=batch_size)
obs = [self.obs[idx] for idx in indices]
return self.standardizer(np.array(obs))
class SquahedGaussianActor(tf.keras.layers.Layer):
def __init__(self, action_dim, hidden_dim=256):
super(SquahedGaussianActor, self).__init__()
self.action_dim = action_dim
# Actor parameters
self.a_l0 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='a/f0')
self.a_l1 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='a/f1')
self.a_l2_mu = tf.keras.layers.Dense(action_dim, name='a/f2_mu')
self.a_l2_log_std = tf.keras.layers.Dense(action_dim, name='a/f2_log_std')
def feedforward(self, obs):
h = self.a_l0(obs)
h = self.a_l1(h)
mean = self.a_l2_mu(h)
log_std = self.a_l2_log_std(h)
std = tf.exp(tf.clip_by_value(log_std, *SCALE_DIAG_MIN_MAX))
return mean, std
def call(self, inputs, **_):
obs, = inputs
mean, std = self.feedforward(obs)
dist = tfp.distributions.MultivariateNormalDiag(mean, std)
dist.shape = mean.shape
sampled_action = dist.sample()
sampled_action_logp = dist.log_prob(sampled_action)
squahsed_action, squahsed_action_logp = \
apply_squashing_func(sampled_action, sampled_action_logp)
deterministic_action, _ = \
apply_squashing_func(mean, dist.log_prob(mean))
return deterministic_action, squahsed_action, squahsed_action_logp, dist
def nlogp(self, dist, action):
''' negative logp of unnormalized action '''
before_squahed_action = tf.atanh(
tf.clip_by_value(action, -1 + EPS, 1 - EPS))
log_likelihood = dist.log_prob(before_squahed_action)
log_likelihood -= tf.reduce_sum(
tf.log(1 - action ** 2 + EPS), axis=1)
return -tf.reduce_mean(log_likelihood)
class VNetwork(tf.keras.layers.Layer):
def __init__(self, output_dim=1, hidden_dim=64):
super(VNetwork, self).__init__()
self.v_l0 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='v/f0')
self.v_l1 = tf.keras.layers.Dense(hidden_dim, activation='relu', name='v/f1')
self.v_l2 = tf.keras.layers.Dense(output_dim, name='v/f2')
def call(self, inputs, **_):
obs, = inputs
h = self.v_l0(obs)
h = self.v_l1(h)
return self.v_l2(h)
class QNetwork(tf.keras.layers.Layer):
def __init__(self, output_dim=1, num_critics=2, hidden_dim=64):
super(QNetwork, self).__init__()
self.num_critics = num_critics
self.qs_l0, self.qs_l1, self.qs_l2 = [], [], []
for i in range(self.num_critics):
self.qs_l0.append(tf.keras.layers.Dense(hidden_dim, activation='relu', name=f'q{i}/f0'))
self.qs_l1.append(tf.keras.layers.Dense(hidden_dim, activation='relu', name=f'q{i}/f1'))
self.qs_l2.append(tf.keras.layers.Dense(output_dim, name=f'q{i}/f2'))
def call(self, inputs, **_):
obs, action = inputs
obs_action = tf.concat([obs, action], axis=1)
outputs = []
for i in range(self.num_critics):
h = self.qs_l0[i](obs_action)
h = self.qs_l1[i](h)
outputs.append(self.qs_l2[i](h))
return outputs
| 40.15873 | 100 | 0.645059 | 1,081 | 7,590 | 4.314524 | 0.148011 | 0.03452 | 0.036235 | 0.038593 | 0.378859 | 0.354631 | 0.246355 | 0.216124 | 0.189751 | 0.155446 | 0 | 0.013297 | 0.227141 | 7,590 | 188 | 101 | 40.37234 | 0.781793 | 0.045323 | 0 | 0.119718 | 0 | 0 | 0.011646 | 0 | 0 | 0 | 0 | 0 | 0.007042 | 1 | 0.15493 | false | 0 | 0.021127 | 0.021127 | 0.316901 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b6d4190127b487993f75e4be2892ed1328ace49 | 3,565 | py | Python | skinematics/sensors/xio.py | mohataher/scikit-kinematics | fa8c80c13981310666e22dcd35138be1af1b4318 | [
"BSD-3-Clause"
] | null | null | null | skinematics/sensors/xio.py | mohataher/scikit-kinematics | fa8c80c13981310666e22dcd35138be1af1b4318 | [
"BSD-3-Clause"
] | null | null | null | skinematics/sensors/xio.py | mohataher/scikit-kinematics | fa8c80c13981310666e22dcd35138be1af1b4318 | [
"BSD-3-Clause"
] | 1 | 2021-11-02T22:53:23.000Z | 2021-11-02T22:53:23.000Z | '''
Import data saved with XIO-sensors
'''
'''
Author: Thomas Haslwanter
Version: 0.2
Date: May-2016
'''
import os
import pandas as pd
def read_ratefile(reg_file):
'''Read send-rates from an XIO sensor.
"Disabled" channels have the "rate" set to "None".
Parameters
----------
in_file : string
Has to be the "Registers"-file.
Returns
-------
rates: directory
Contains the send-rates for the different "params".
'''
params = ['Sensor',
'DateTime',
'BatteryAndThermometer',
'InertialAndMagnetic',
'Quaternion'
]
rates = {}
# The the file content
with open(reg_file, 'r') as in_file:
lines = in_file.readlines()
# Get the send rates
for param in params:
for line in lines:
if line.find(param) > 0:
rate_flag = int(line.split(',')[2])
if rate_flag:
'''
0 ... 1 Hz
1 ... 2 Hz
10 ... 512 Hz
'''
rates[param] = 2 ** (rate_flag-1)
else:
# Disabled
rates[param] = None
return rates
def read_datafile(in_file):
'''Read data from an XIO "CalInertialAndMag"-file.
Parameters
----------
in_file : string
Has to be the name of the "CalInertialAndMag"-file.
Returns
-------
out_list: list
Contains the following parameters:
- acceleration
- angular_velocity
- mag_field_direction
- packet_nr
'''
data = pd.read_csv(in_file)
out_list = []
# Extract the columns that you want, by name
param_list=['Acc', 'Gyr', 'Mag', 'Packet']
for Expression in param_list:
out_list.append(data.filter(regex=Expression).values)
return out_list
def get_data(in_selection):
'''Get the sampling rates, as well as the recorded data.
Parameters
----------
in_selection : string
Directory containing all the data-files, or
filename of one file in that directory
Returns
-------
out_list: list
Contains the following parameters:
- rate
- acceleration
- angular_velocity
- mag_field_direction
- packet_nr
'''
if os.path.isdir(in_selection):
in_dir = in_selection
else:
in_file = in_selection
in_dir = os.path.split(in_file)[0]
file_list = os.listdir(in_dir)
# Get the filenames, based on the XIO-definitions
files = {}
for file in file_list:
if file.find('Registers') > 0:
files['register'] = os.path.join(in_dir, file)
if file.find('CalInertialAndMag') > 0:
files['data'] = os.path.join(in_dir, file)
# Read in the registers-file, and extract the sampling rates
rates = read_ratefile(files['register'])
# Read the sensor-data
data = read_datafile(files['data'])
return ([rates['InertialAndMagnetic']] + data)
if __name__=='__main__':
test_dir = r'../../tests/data/data_xio'
assert os.path.exists(test_dir)
data = get_data(test_dir)
print('Rate: {0} [Hz]'.format(data[0]))
print('Acceleration [m/s^2]:\n {0}'.format(data[1]))
| 24.756944 | 64 | 0.520898 | 397 | 3,565 | 4.528967 | 0.332494 | 0.030033 | 0.010011 | 0.024472 | 0.167964 | 0.167964 | 0.14683 | 0.14683 | 0 | 0 | 0 | 0.011989 | 0.368303 | 3,565 | 143 | 65 | 24.93007 | 0.786412 | 0.349229 | 0 | 0.04 | 0 | 0 | 0.116667 | 0.023958 | 0 | 0 | 0 | 0 | 0.02 | 1 | 0.06 | false | 0 | 0.04 | 0 | 0.16 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b6de959719deda0bcf0d3617c5c148c46c3e48d | 2,166 | py | Python | zcls/util/cutmix.py | ZJCV/PyCls | 1ef59301646b6134f2ffcc009b4fd76550fa4089 | [
"Apache-2.0"
] | 110 | 2021-02-04T14:32:57.000Z | 2022-03-30T01:51:56.000Z | zcls/util/cutmix.py | ZJCV/PyCls | 1ef59301646b6134f2ffcc009b4fd76550fa4089 | [
"Apache-2.0"
] | 8 | 2021-04-11T02:46:57.000Z | 2021-12-14T19:30:58.000Z | zcls/util/cutmix.py | ZJCV/PyCls | 1ef59301646b6134f2ffcc009b4fd76550fa4089 | [
"Apache-2.0"
] | 20 | 2021-02-07T14:17:07.000Z | 2022-03-22T05:20:40.000Z | # -*- coding: utf-8 -*-
"""
@date: 2021/7/26 下午10:10
@file: cutmix.py
@author: zj
@description:
refer to [ clovaai/CutMix-PyTorch](https://github.com/clovaai/CutMix-PyTorch)
"""
import torch
import numpy as np
from zcls.config.key_word import KEY_LOSS
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def cutmix_data(images, targets, alpha=1.0, device=torch.device('cpu')):
'''
Returns mixed inputs, pairs of targets, and lambda
'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = images.size()[0]
rand_index = torch.randperm(batch_size).to(device)
targets_a = targets
targets_b = targets[rand_index]
bbx1, bby1, bbx2, bby2 = rand_bbox(images.size(), lam)
images[:, :, bbx1:bbx2, bby1:bby2] = images[rand_index, :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (images.size()[-1] * images.size()[-2]))
return images, targets_a, targets_b, lam
def cutmix_criterion(criterion, output_dict: dict, targets_a: torch.Tensor, targets_b: torch.Tensor, lam):
loss_a = criterion(output_dict, targets_a)[KEY_LOSS]
loss_b = criterion(output_dict, targets_b)[KEY_LOSS]
total_loss = lam * loss_a + (1 - lam) * loss_b
return {KEY_LOSS: total_loss}
def cutmix_evaluate(evaluator, output_dict, targets_a, targets_b, lam):
acc_dict_a = evaluator.evaluate_train(output_dict, targets_a)
acc_dict_b = evaluator.evaluate_train(output_dict, targets_b)
total_acc_dict = dict()
for (a_key, a_value), (b_key, b_value) in zip(acc_dict_a.items(), acc_dict_b.items()):
assert a_key == b_key
total_acc_dict[a_key] = lam * a_value + (1 - lam) * b_value
return total_acc_dict
| 26.740741 | 106 | 0.645891 | 346 | 2,166 | 3.83237 | 0.289017 | 0.036953 | 0.064103 | 0.040724 | 0.13273 | 0.104072 | 0.045249 | 0.045249 | 0 | 0 | 0 | 0.033392 | 0.211911 | 2,166 | 80 | 107 | 27.075 | 0.743409 | 0.124654 | 0 | 0 | 0 | 0 | 0.001603 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 1 | 0.095238 | false | 0 | 0.071429 | 0 | 0.261905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b707cd697e11ff349acb292012f7c6bd2f868a2 | 4,625 | py | Python | jsk_2015_05_baxter_apc/node_scripts/common.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | null | null | null | jsk_2015_05_baxter_apc/node_scripts/common.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | 2 | 2019-04-11T05:36:23.000Z | 2019-08-19T12:58:10.000Z | jsk_2015_05_baxter_apc/node_scripts/common.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import yaml
import gzip
import cPickle as pickle
import cv2
from catkin import terminal_color
import rospy
from jsk_2015_05_baxter_apc.srv import ObjectMatch, ObjectMatchResponse
def get_data_dir():
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../data')
sub_data_dir = lambda x: os.path.join(data_dir, x)
for sub in ['siftdata', 'histogram_data', 'bof_data']:
if not os.path.exists(sub_data_dir(sub)):
os.mkdir(sub_data_dir(sub))
return data_dir
def load_img(imgpath):
img = cv2.imread(imgpath)
if img is None:
rospy.logerr('not found {}'.format(imgpath))
return img
def save_siftdata(obj_name, siftdata):
"""Save sift data to data/siftdata/{obj_name}.pkl.gz"""
data_dir = get_data_dir()
siftdata_dir = os.path.join(data_dir, 'siftdata')
if not os.path.exists(siftdata_dir):
os.mkdir(siftdata_dir)
filename = os.path.join(siftdata_dir, obj_name+'.pkl.gz')
rospy.loginfo('save siftdata: {o}'.format(o=obj_name))
with gzip.open(filename, 'wb') as f:
pickle.dump(siftdata, f)
def load_siftdata(obj_name, return_pos=True, dry_run=False, data_dir=None):
"""Load sift data from pkl file"""
if data_dir is None:
data_dir = os.path.join(get_data_dir(), 'siftdata')
datafile = os.path.join(data_dir, '{0}.pkl.gz'.format(obj_name))
if dry_run: # check if exists
if os.path.exists(datafile):
return datafile
else:
return
if not os.path.exists(datafile):
print('not found siftdata: {0}'.format(obj_name))
return # does not exists
print('load siftdata: {0}'.format(obj_name))
with gzip.open(datafile, 'rb') as f:
siftdata = pickle.load(f)
if return_pos:
return siftdata
return siftdata['descriptors']
def get_train_imgs(
obj_name,
data_dir=None,
only_appropriate=True,
with_mask=True,
):
"""Find train image paths from data/obj_name"""
if data_dir is None:
data_dir = get_data_dir()
obj_dir = os.path.join(data_dir, obj_name)
if not os.path.exists(obj_dir):
print(terminal_color.fmt(
'@{yellow}[WARNING] not found object data: {0}'
).format(obj_name))
else:
os.chdir(obj_dir)
for imgfile in os.listdir('.'):
if not imgfile.endswith('.jpg'):
continue
if only_appropriate:
# N1_30.jpg -> N1_30
basename, _ = os.path.splitext(imgfile)
# N1_30 -> N1, 30
camera_pos, rotation_deg = basename.split('_')
rotation_deg = int(rotation_deg)
with open(os.path.join(data_dir, 'appropriate_images.yml')) as f:
# {'N1': ['0-30']}
appropriate_data = yaml.load(f)[obj_name]
if (not appropriate_data) or (camera_pos not in appropriate_data):
continue
skip = True
for min_max in appropriate_data[camera_pos]:
_min, _max = map(int, min_max.split('-'))
if _min <= rotation_deg <= _max:
skip = False
break
if skip:
continue
train_path = os.path.join(obj_dir, imgfile)
train_img = cv2.imread(train_path)
if with_mask:
maskfile = os.path.splitext(imgfile)[0] + '_mask.pbm'
mask_path = os.path.join(obj_dir, 'masks', maskfile)
mask = cv2.imread(mask_path)
train_img = cv2.add(mask, train_img)
yield train_img
os.chdir(data_dir)
class ObjectMatcher(object):
def __init__(self, service_name):
rospy.Service(service_name, ObjectMatch, self._cb_matcher)
def _cb_matcher(self, req):
"""Callback function for sift match request"""
rospy.loginfo('received request: {}'.format(req.objects))
probs = self.match(req.objects)
return ObjectMatchResponse(probabilities=probs)
def match(self, obj_names):
"""Get object match probabilities"""
raise NotImplementedError('override this method')
def is_imgfile(filename):
_, ext = os.path.splitext(filename)
if ext in ['.jpg', '.jpeg', '.png', '.pgm']:
return True
return False
def listdir_for_img(data_dir):
for f in os.listdir(data_dir):
if is_imgfile(f):
yield f
| 32.570423 | 82 | 0.592432 | 603 | 4,625 | 4.334992 | 0.253731 | 0.064269 | 0.038256 | 0.026779 | 0.142311 | 0.047437 | 0.016832 | 0 | 0 | 0 | 0 | 0.010086 | 0.292541 | 4,625 | 141 | 83 | 32.801418 | 0.788814 | 0.068973 | 0 | 0.100917 | 0 | 0 | 0.070459 | 0.00515 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091743 | false | 0 | 0.073395 | 0 | 0.266055 | 0.027523 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b71925db630f1e5d7b40afa3a4eea245f2b4272 | 372 | py | Python | examples/timer.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 23 | 2018-03-07T18:58:13.000Z | 2022-03-29T17:11:47.000Z | examples/timer.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 87 | 2018-04-04T19:44:13.000Z | 2022-03-31T11:18:00.000Z | examples/timer.py | TeskaLabs/asab | f28894b62bad192d8d30df01a8ad1b842ee2a2fb | [
"BSD-3-Clause"
] | 10 | 2018-04-30T16:40:25.000Z | 2022-03-09T10:55:24.000Z | #!/usr/bin/env python3
import asab
class TimerApplication(asab.Application):
async def initialize(self):
# The timer will trigger a message publishing at every second
self.Timer = asab.Timer(self, self.on_tick, autorestart=True)
self.Timer.start(1)
async def on_tick(self):
print("Think!")
if __name__ == '__main__':
app = TimerApplication()
app.run()
| 17.714286 | 63 | 0.723118 | 52 | 372 | 4.980769 | 0.692308 | 0.061776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006369 | 0.155914 | 372 | 20 | 64 | 18.6 | 0.818471 | 0.217742 | 0 | 0 | 0 | 0 | 0.048443 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b720ba819c29547ae26389980f418117f2a4a29 | 26,708 | py | Python | main.py | ikarmus2001/DziekanBOT | 393df5b5d4bdfa6e34bd44da0cca8e97fa51e46a | [
"MIT"
] | 2 | 2021-02-22T16:38:08.000Z | 2021-02-23T12:26:29.000Z | main.py | ikarmus2001/DziekanBOT | 393df5b5d4bdfa6e34bd44da0cca8e97fa51e46a | [
"MIT"
] | null | null | null | main.py | ikarmus2001/DziekanBOT | 393df5b5d4bdfa6e34bd44da0cca8e97fa51e46a | [
"MIT"
] | 1 | 2021-02-22T10:48:38.000Z | 2021-02-22T10:48:38.000Z | import discord as dc
from dotenv import load_dotenv
from os import getenv
import datetime as dt
import json, string
load_dotenv()
#*#*#*# variables #*#*#*#
config_relative_path = getenv("CONFIG")
database_relative_path = getenv("DATABASE")
token = getenv("TOKEN")
#*#*#*#*#*#*#*#*#*#*#*#*#
with open(config_relative_path) as f:
cfg = json.load(f)
with open(database_relative_path) as f:
db = json.load(f)
class BOT(dc.Client):
def __init__(self, intents=None, *args, **kwargs):
super().__init__(*args, **kwargs, intents=intents)
self.prefix = cfg['prefix']
self.perms = cfg['perms']
self.debugging = db['debugMode']
async def on_ready(self):
await self.loadLogsChannel()
for guild in self.guilds:
print(f"{self.user} connected to {guild.name}, id: {guild.id}")
print(f"{self.user.name} is alive!")
async def on_message(self, message):
if message.author == self.user:
return
elif db["groupReg"]["active"] and message.channel.id == db["groupReg"]["channel_id"]:
if "lab" in message.content.lower() or "mat" in message.content.lower():
await self.groupReg(message)
elif message.content.startswith(self.prefix):
await self.command(message)
elif (self.user.name + " ssie") in message.content or (self.user.name + " sucks") in message.content:
await message.reply("૮( ᵒ̌▱๋ᵒ̌ )ა ?!")
async def command(self, message):
content = message.content[len(self.prefix):]
args = content.split()[1::] if len(content.split()) > 1 else [None]
command = content.split()[0]
# say command
if command == "say" and await self.checkPerms(message, "say"):
await message.delete()
if any(args):
await message.channel.send(" ".join([arg for arg in args]))
# message purge
elif command == "purge" and await self.checkPerms(message, "purge"):
try:
delRan = int(args[0])
except:
await message.reply("Please specify how many messages to purge.")
else:
if delRan in range(1,51):
await message.channel.purge(limit=delRan+1, bulk=True)
if self.logsActive: await self.log(message)
else:
await message.reply("Purge amount must be in range from `1` to `50`.")
# user info embed getter
elif command == "me" and await self.checkPerms(message, "me"):
if len(message.mentions) == 1:
await message.channel.send(embed=self.getMeEmbed(message, message.mentions[0]))
else:
await message.channel.send(embed=self.getMeEmbed(message))
# role/channel ID getter
elif command == "id" and await self.checkPerms(message, "id"):
if len(args) == 1:
if len(message.role_mentions) == 1:
await message.channel.send(f"id: `{message.role_mentions[0].id}`")
elif len(message.channel_mentions) == 1:
await message.channel.send(f"id: `{message.channel_mentions[0].id}`")
elif len(message.mentions) == 1:
await message.channel.send(f"id: `{message.mentions[0].id}`")
# avatar getter
elif command == "avatar" or command == "av" and await self.checkPerms(message, "avatar"):
if message.mentions:
avatar_url = self.getAvatarURL(message.mentions[0])
else:
avatar_url = self.getAvatarURL(message.author)
await message.reply(avatar_url)
# perms getter/setter
elif command == "perms" or command == "permissions" and await self.checkPerms(message, "permissions"):
if args[0] == "set" and len(args) == 3 and await self.checkPerms(message, "permissions_manage"):
try:
lvl = int(args[2])
if len(message.role_mentions) == 1:
role_id = message.raw_role_mentions[0]
else:
role_id = args[1]
except:
await message.reply(f"Please specify a permission level and role to assign the permission to.")
else:
if lvl not in range(1,3):
await message.reply("Perms level can only be 1 or 2")
else:
if self.managePerms("set", level=lvl, role=role_id):
await message.reply("Role permission changed successfully")
if self.logsActive: await self.log(message)
else:
await message.reply("Error occured while changing role permissions.")
elif (args[0] == "delete" or args[0] == "del") and await self.checkPerms(message, "permissions_manage"):
if len(args) == 2:
if len(message.role_mentions) == 1:
role_id = message.raw_role_mentions[0]
else:
role_id = args[1]
if self.managePerms("delete", role=role_id):
if self.logsActive: await self.log(message)
await message.reply("Role permission deleted successfully")
else:
await message.reply("Error occured while deleting role permissions.")
else:
await message.reply(f"Please specify a role to delete the permission from.")
elif not any(args):
perm_lvl = self.getUserPerms(message.author)
await message.reply(f"Your permission level: `{perm_lvl if perm_lvl < 3 else 'GOD'}`")
# bot prefix setter
elif command == "prefix" and await self.checkPerms(message, "prefix"):
if args[0]:
self.setPrefix(args[0])
await message.channel.send(f"Prefix successfully set to: `{args[0]}`")
if self.logsActive: await self.log(message)
# leaderboard getter
elif command == "leaderboard" and await self.checkPerms(message, "leaderboard"):
lb_len = 5
if args[0]:
try:
lb_len = int(args[0])
except:
await message.reply(f"Please specify the leaderboard lenght like: `{self.prefix}leaderboard 10`")
lb = self.getLeaderboard(message.guild, lb_len)
await message.channel.send(lb)
# debug mode
elif (command == "debug" or command == "debugging") and await self.checkPerms(message, "debugging"):
if args[0] == "on" or args[0] == "true" or args[0] == "1":
if self.debugging:
await message.reply("Debugging mode is already `on`")
else:
self.debugging = db['debugMode'] = True
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Debugging mode has been successfully turned `on`")
elif args[0] == "off" or args[0] == "false" or args[0] == "0":
if not self.debugging:
await message.reply("Debugging mode is already `off`")
else:
self.debugging = db['debugMode'] = False
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Debugging mode has been successfully turned `off`")
# logs management
elif command == "logs" and await self.checkPerms(message, "logs"):
if args[0] == "set":
if len(args) == 2 and len(message.channel_mentions) == 1:
await self.setLogsChannel(message.channel_mentions[0].id)
await message.reply(f"Logs channel successfully set to {message.channel_mentions[0].mention}")
else:
await message.reply(f"Please specify a log channel like: `{self.prefix}logs set #someLogsChannel`")
elif len(args) == 1 and (args[0] == "on" or args[0] == "true" or args[0] == "1"):
self.logsActive = True
db['logs']['active'] = True
self.saveDatabase()
if self.logsActive: await self.log(message)
await message.reply("Logs are now turned `on`")
elif len(args) == 1 and (args[0] == "off" or args[0] == "false" or args[0] == "0"):
if self.logsActive: await self.log(message)
self.logsActive = False
db['logs']['active'] = False
self.saveDatabase()
await message.reply("Logs are now turned `off`")
# semester management
elif (command == "semester" or command == "sem") and await self.checkPerms(message, "semester_manage"):
if args[0] == "new" or args[0] == "start":
if not db["groupReg"]["active"]:
try:
group_count = int(args[1])
except:
await message.reply(f"Please specify the number of groups like: `{self.prefix}semester new 8`")
else:
if await self.openGroupReg(message, group_count):
await message.reply("New semester started successfully!")
if self.logsActive: await self.log(message)
else:
await message.reply("An error has occured while creating new semester.")
else:
await message.reply("Group registration is already open!")
elif args[0] == "close" or args[0] == "end":
if db["groupReg"]["active"]:
await self.closeGroupReg(message)
if self.logsActive: await self.log(message)
await message.reply("Group registration has successfully been closed.")
else:
await message.reply("There's no group registration currently ongoing to close!")
# *=*=*=*=*=*=*=*=* COMMANDS *=*=*=*=*=*=*=*=* #
def saveDatabase(self):
with open(database_relative_path, mode="w") as f:
json.dump(db, f, indent=4)
async def loadLogsChannel(self):
channel = await self.fetch_channel(db['logs']['id'])
if channel:
self.logsChannel = channel
self.logsActive = db['logs']['active']
else:
self.logsActive = db['logs']['active'] = False
self.saveDatabase()
print("Logs channel could not be found by id -- Logs were turned off.")
async def setLogsChannel(self, channel_id):
db['logs']['id'] = channel_id
self.saveDatabase()
await self.loadLogsChannel()
def getUserPerms(self, user):
lvls = [0]
for pLvl, pRoles in db['permRoles'].items():
if any([role.id in pRoles for role in user.roles]):
lvls.append(int(pLvl))
permLevel = max(lvls)
if permLevel == 0 and self.debugging: return -1
return permLevel
async def checkPerms(self, message, command):
perm_lvl = self.getUserPerms(message.author)
if self.debugging and perm_lvl == -1:
await message.reply("Can't use commands while bot is in debugging mode.")
return False
try:
required = cfg["perms"][command]
except:
required = float('infinity')
if self.getUserPerms(message.author) >= required:
return True
else:
await message.reply("You don't have the permission to use this command.")
return False
def getAvatarURL(self, user):
base = "https://cdn.discordapp.com/avatars/"
return base + str(user.id) + "/" + str(user.avatar)
def getMeEmbed(self, message, user = None):
embed = dc.Embed(title="User info")
if not user:
user = message.author
embed.color = user.color
embed.set_image(url=self.getAvatarURL(user))
joined_info = f"Joined server on `{user.joined_at.strftime('%d/%m/%Y')}`"
joined_info += f"\nBeen here for: `{str(dt.datetime.now() - user.joined_at).split(',')[0]}`"
user_roles = [role.mention for role in user.roles if role.name != "@everyone"]
if not any(user_roles):
roles_info = "No roles to see here!"
else:
roles_info = ", ".join(user_roles)
# ranking_info =
embed.add_field(name="Join Date", value=joined_info, inline=False)
embed.add_field(name="User Roles", value=roles_info, inline=False)
# embed.add_field(name="Ranking", value=ranking_info, inline=False)
return embed
def setPrefix(self, new_prefix):
cfg["prefix"] = new_prefix
with open(config_relative_path, mode="w") as f:
json.dump(cfg, f, indent=4)
self.prefix = new_prefix
def getLeaderboard(self, guild, lenght = 5):
ranking = db["ranking"]
ranking.sort(key = lambda x: x["exp"], reverse = True)
lb = ""
r=1
for i in range(min(len(ranking), lenght, 15)):
user = ranking[i]
if not guild.get_member(user['id']):
lb+=f"#{r} {guild.get_member(user['id'])}: {user.get('exp')}\n"
r+=1
print(lb)
return lb
def managePerms(self, command, **args):
if command == "set":
try:
level = args["level"]
role = args["role"]
except:
return False
else:
for pLvl, pRoles in db["permRoles"].items():
if role in pRoles:
if int(pLvl) == level:
return True
db["permRoles"][pLvl] = [r for r in db["permRoles"][pLvl] if r != role]
break
db["permRoles"][str(level)].append(role)
self.saveDatabase()
return True
elif command == "delete":
try:
role = args["role"]
except:
return False
else:
for pLvl, pRoles in db["permRoles"].items():
if role in pRoles:
db["permRoles"][pLvl] = [r for r in db["permRoles"][pLvl] if r != role]
self.saveDatabase()
return True
return False
async def log(self, message, custom = False):
if not custom:
case = db['logs']['cases']
db['logs']['cases'] = case+1
self.saveDatabase()
embed = dc.Embed(title=f"Log Case #{case}")
embed.color = message.author.color
embed.add_field(name="Author", value=message.author.mention, inline=True)
embed.add_field(name="Channel", value=message.channel.mention, inline=True)
embed.add_field(name="Date", value=dt.datetime.now().strftime("%d/%m/%Y %H:%M:%S"), inline=True)
embed.add_field(name="Command", value=f"`{message.content}`", inline=True)
await self.logsChannel.send(embed=embed)
else:
await self.logsChannel.send(message)
async def resetGroupRoles(self, channel, group_count):
role_template = cfg["nameSpace"]["labRoleTemplate"].split('#')
math_role_template = cfg["nameSpace"]["mathRoleTemplate"].split('#')
if len(role_template) != 2:
print("config group role template invalid: missing '#'?")
return False
elif len(math_role_template) != 2:
print("config math group role template invalid: missing '#'?")
return False
# initialize flags to see which roles exist and create the nonexistent ones later
lab_flags = [0 for _ in range(group_count)]
mat_flags = [0 for _ in range((group_count-1)//2 + 1)]
records = {} # keep record of removed data to save and log it later
for role in await channel.guild.fetch_roles():
if (role.name.startswith(role_template[0]) and role.name.endswith(role_template[1])) or (role.name.startswith(math_role_template[0]) and role.name.endswith(math_role_template[1])):
role_type = "LAB" if role.name.startswith(role_template[0]) else "MAT"
records[str(role.name)] = []
members = role.members
# g_id determines the current group's number
if role_type == "LAB":
g_id = int(role.name[len(role_template[0]):-len(role_template[1])])
elif role_type == "MAT":
g_id = int(role.name[len(math_role_template[0]):-len(math_role_template[1])])
# clear role from every user and store the changes in records
await channel.send(f"Clearing `{role.name}` from `{len(members)}` users..")
for member in members:
records[role.name].append(str(member.name + '#' + member.discriminator))
await member.remove_roles(role)
# remove the role entirely if it's not in range of new semester's group length
if g_id not in range(1,group_count+1):
await channel.send(f"Removing `{role.name}`..")
await role.delete()
elif role_type == "MAT" and g_id not in range(1,len(mat_flags)+1):
await channel.send(f"Removing `{role.name}`..")
await role.delete()
else:
# set flags for roles kept for next semester and save their id's in db for future registration management
if role_type == "LAB":
lab_flags[g_id-1] = 1
db["groupReg"]["role_ids"][str(g_id)] = role.id
elif role_type == "MAT":
mat_flags[g_id-1] = 1
db["groupReg"]["math_role_ids"][str(g_id)] = role.id
self.saveDatabase()
# create nonexistent roles based on gaps in flags
for ID, flag in enumerate(lab_flags):
if not flag:
name = f"{role_template[0]}{ID+1}{role_template[1]}"
await channel.send(f"Creating `{name}`..")
role = await channel.guild.create_role(name=name,mentionable=True,hoist=True,color=dc.Color.random())
db["groupReg"]["role_ids"][str(ID+1)] = role.id
for ID, flag in enumerate(mat_flags):
if not flag:
name = f"{math_role_template[0]}{ID+1}{math_role_template[1]}"
await channel.send(f"Creating `{name}`..")
role = await channel.guild.create_role(name=name,mentionable=True,color=dc.Color.random())
db["groupReg"]["math_role_ids"][str(ID+1)] = role.id
self.saveDatabase()
# save records to file and log them to logs channel if active
with open('archives.txt', 'a') as f:
json.dump(records, f, indent=4)
# if self.logsActive:
# await self.log(f'```json\n{json.dumps(records,indent=4)}\n```', custom=True)
# await channel.send(f'`Archive sent to logs channel and saved on machine.`')
# else:
await channel.send(f'`Archive saved on machine.`')
return True
async def openGroupReg(self, message, group_count):
if await self.resetGroupRoles(message.channel, group_count):
db["groupReg"]["active"] = True
db["groupReg"]["groupCount"] = group_count # group_count determines the len of lab groups in new semester
# rid of registration category and text channels if they exist
for category in message.guild.categories:
if category.name == cfg["nameSpace"]["groupsRegCategory"]:
for channel in category.channels:
await channel.delete()
await category.delete()
break
# create new category with its text channels for registration
GRC = await message.guild.create_category(name=cfg["nameSpace"]["groupsRegCategory"], position=2)
GRIC = await GRC.create_text_channel(name=cfg["nameSpace"]["groupsRegInfoChannel"])
await GRIC.set_permissions(message.guild.roles[0], send_messages = False, read_messages = True)
GRC = await GRC.create_text_channel(name=cfg["nameSpace"]["groupsRegChannel"])
# save the channel id used for registration for command management purposes
db["groupReg"]["channel_id"] = GRC.id
self.saveDatabase()
# send registration opening notification to GRIC
await message.channel.send(f'`Group registration channel created.`')
info = f''':warning: @everyone Rejestracja do grup w nowym semestrze została otwarta! :warning: \n
**Aby poprawnie zarejestrować się do grupy LAB oraz MAT wyślij** `lab #numerGrupy` **oraz** `mat #numerGrupy` **na kanale** {GRC.mention}, np. `lab 4`; `mat 2` lub `lab 4 mat 2`.
Dla osób będących w kilku grupach laboratoryjnych jednocześnie - proszę kontaktować się z administracją serwera.'''
await GRIC.send(info)
# send new semester decorator on all group channels
for channel in message.guild.channels:
if channel.name.endswith(cfg["nameSpace"]["generalChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
elif channel.name.endswith(cfg["nameSpace"]["datesChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
elif channel.name.startswith(cfg["nameSpace"]["mathChannelTemplate"]):
await channel.send(cfg["nameSpace"]["newSemesterDecorator"])
return True
return False
async def groupReg(self, message):
user = message.author
content = message.content.lower()
l_id = content.find('lab')
m_id = content.find('mat')
digits = string.digits
lab_gr = mat_gr = None
# do some string magic to extract lab group number from message if it inclues "lab" keyword
if l_id >= 0:
if m_id > l_id: # dont include the "mat" keyword if it appears after "lab"
cntnt = content[l_id+3:m_id].lstrip()
else: cntnt = content[l_id+3:].lstrip()
lab_gr = int("".join([v for vID, v in enumerate(cntnt) if v in digits and not any([c not in digits for c in cntnt[:vID]])]))
# return with an exception if the number is not in current lab groups range
if lab_gr not in range(1,db["groupReg"]["groupCount"]+1):
await message.reply(f"Lab group needs to be between `1` and `{db['groupReg']['groupCount']}`.")
return
# same string magic for mat group number
if m_id >= 0:
if l_id > m_id: # dont include the "lab" keyword if it appears after "mat"
cntnt = content[m_id+3:l_id].lstrip()
else: cntnt = content[m_id+3:].lstrip()
mat_gr = int("".join([v for vID, v in enumerate(cntnt) if v in digits and not any([c not in digits for c in cntnt[:vID]])]))
# return with an exception if the number is not in current mat groups range
if mat_gr not in range(1,(db["groupReg"]["groupCount"]-1)//2 + 2):
await message.reply(f"Mat group needs to be between `1` and `{(db['groupReg']['groupCount']-1)//2 + 1}`.")
return
# assign group roles to user and catch the output
out = await self.regToGroups(user, lab_gr, mat_gr)
if out:
await message.reply(f"Successfully registered to: `{'`, `'.join(out)}`")
else:
await message.reply("An error occured while registering to group, please try again.")
async def regToGroups(self, user, labGroup=None, matGroup=None):
if not (labGroup or matGroup): return False
for role in user.roles:
if labGroup and role.id in tuple(db["groupReg"]["role_ids"].values()):
await user.remove_roles(role)
elif matGroup and role.id in tuple(db["groupReg"]["math_role_ids"].values()):
await user.remove_roles(role)
output = [] # store successfully applied roles in output
if labGroup:
lab_id = db["groupReg"]["role_ids"][str(labGroup)]
role = user.guild.get_role(lab_id)
output.append(role.name)
await user.add_roles(role)
if matGroup:
mat_id = db["groupReg"]["math_role_ids"][str(matGroup)]
role = user.guild.get_role(mat_id)
output.append(role.name)
await user.add_roles(role)
return output
async def closeGroupReg(self, message):
# reset group registration database
db["groupReg"]["active"] = False
db["groupReg"]["channel_id"] = None
db["groupReg"]["groupCount"] = 0
db["groupReg"]["role_ids"] = {}
db["groupReg"]["math_role_ids"] = {}
self.saveDatabase()
# rid of registration category and text channels if they exist
for category in message.guild.categories:
if category.name == cfg["nameSpace"]["groupsRegCategory"]:
for channel in category.channels:
await channel.delete()
await category.delete()
break
intents = dc.Intents.all()
bot_client = BOT(intents=intents)
bot_client.run(token) | 48.648452 | 193 | 0.545305 | 3,087 | 26,708 | 4.650794 | 0.139294 | 0.037612 | 0.039075 | 0.019921 | 0.425994 | 0.332939 | 0.288361 | 0.227137 | 0.18841 | 0.153375 | 0 | 0.008326 | 0.338962 | 26,708 | 549 | 194 | 48.648452 | 0.804758 | 0.074734 | 0 | 0.307865 | 0 | 0.006742 | 0.178333 | 0.020124 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017978 | false | 0 | 0.011236 | 0 | 0.080899 | 0.013483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b7335fdc121bab7e4b361e779e47bb56e5672d8 | 6,635 | py | Python | filter_cube.py | jd-au/thor-hi | 16a1326fcfe2ffaac496d2576b8727ca2f12dc9b | [
"Apache-2.0"
] | null | null | null | filter_cube.py | jd-au/thor-hi | 16a1326fcfe2ffaac496d2576b8727ca2f12dc9b | [
"Apache-2.0"
] | null | null | null | filter_cube.py | jd-au/thor-hi | 16a1326fcfe2ffaac496d2576b8727ca2f12dc9b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python -u
# Filter out emission from a THOR HI cube. Will 2D Fourier transform each plane of the cube and zero out the centre
# of the Fourier image and then inverse Fouroer transform back to the image domain. This produces a cube without the
# large scale emission
# Author James Dempsey
# Date 26 Nov 2017
from __future__ import print_function, division
import argparse
import sys
import time
from astropy.io import fits
import numpy as np
import pyfftw
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(
description="Filter the large scale emission from an imag cube using Fourier transforms",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input", help="The name of the file to be filtered.")
parser.add_argument("output", help="The name of the filtered file to be produced.")
parser.add_argument("-r", "--radius", help="The radius of the filter to apply to the centre of the Fourier image.",
default=20, type=int)
parser.add_argument("-t", "--threads", help="The number of threads to be used for the Fourier transform.",
default=4, type=int)
args = parser.parse_args()
return args
def do_fftw(image, threads=2):
"""
Calculate the Fourier transform of the input 2 dimensional image using the
pyFFTW library.
:param image: The square float64 image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The fourier transform.
"""
image_in = pyfftw.empty_aligned(image.shape, dtype='float64')
image_in[:] = image
fft_object = pyfftw.builders.fft2(image_in, axes=(0, 1), threads=threads)
image_out = fft_object()
return image_out
def do_ifftw(image, threads=2):
"""
Calculate the inverse Fourier transform of the input 2 dimensional Fourier image using the
pyFFTW library.
:param image: The square complex128 image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The fourier transform.
"""
image_in = pyfftw.empty_aligned(image.shape, dtype='complex128')
image_in[:] = image
fft_object = pyfftw.builders.ifft2(image_in, axes=(0, 1), threads=threads)
image_out = fft_object()
return image_out
def fft_image(image, threads=4):
"""
Produce a processed Fourier transform of the input image. The image must be
square and of type float64 and real only. The Fourier transform will be
shifted to have the zero-frequency component in the centre of the image.
:param image: The square image to be transformed.
:param threads: The number of threads to be used by pyFFTW.
:return: The centred complex Fourier transform.
"""
#ft_img = np.fft.fft2(image)
ft_img = do_fftw(image, threads)
#print(ft_img.shape)
ft_shift = np.fft.fftshift(ft_img)
return ft_shift
def ifft_image(ft_shift, threads=4):
"""
Invert a Fourier transform of an image. The resulting image will be
square and of type complex128. The real aspect of this image will represent the image.
The Fourier transform will be unshifted to move the zero-frequency component away from the centre of the image.
:param ft_shift: The centred complex Fourier transform.
:param threads: The number of threads to be used by pyFFTW.
:return: The complex inverse Fourier transformed image.
"""
unshifted = np.fft.ifftshift(ft_shift)
#inverted = np.fft.ifft2(unshifted)
inverted = do_ifftw(unshifted, threads=threads)
return inverted
def filter_plane(plane, radius=20, threads=4):
# Prepare the spatial slice for fft
start = time.time()
flipped = np.concatenate((plane, np.fliplr(plane)), axis=1)
mirrored = np.concatenate((flipped, np.flipud(flipped)), axis=0)
x_pad = (mirrored.shape[0] - mirrored.shape[1]) // 2
padded = np.lib.pad(mirrored, ((0, 0), (x_pad, x_pad)), 'constant')
prep_end = time.time()
print(' Prep for plane took %.02f s' % (prep_end - start))
sys.stdout.flush()
# Do the fft
ft_img = fft_image(padded, threads)
ft_end = time.time()
print(' FFT for plane took %.02f s' % (ft_end - prep_end))
sys.stdout.flush()
# Filter out the large scsle emission
centre_y = ft_img.shape[0] // 2
centre_x = ft_img.shape[1] // 2
ft_img[centre_y - radius:centre_y + radius, centre_x - radius:centre_x + radius] = 0
# Invert the fft to get back the image
inverted = ifft_image(ft_img, threads)
ift_end = time.time()
print(' iFFT for plane took %.02f s' % (ift_end - ft_end))
sys.stdout.flush()
post_psd_2d = inverted.real
centre_y = post_psd_2d.shape[0] // 2
centre_x = post_psd_2d.shape[1] // 2
post_plane = post_psd_2d[:centre_y, x_pad:centre_x].astype(np.float32)
return post_plane
def filter_image(image, radius=40, threads=4):
#pyfftw.interfaces.cache.enable()
filtered = np.zeros(image.shape, dtype=np.float32)
for idx in range(image.shape[0]):
print("Processing plane", idx)
sys.stdout.flush()
plane = image[idx, :, :]
post_plane = filter_plane(plane, radius, threads)
filtered[idx, :, :] = post_plane
return filtered
def load_image(filename):
hdulist = fits.open(filename, memmap=True)
image = hdulist[0].data
print("Image shape is", image.shape)
header = hdulist[0].header
return image, header
def save_image(filename, image, header, radius):
header['history'] = "Emission filtered with radius {} Fourier filter.".format(radius)
hdu = fits.PrimaryHDU(image, header)
hdu.writeto(filename, overwrite=True)
def main():
"""
Main script for filter_cube
:return: The exit code
"""
args = parseargs()
start = time.time()
print("#### Started filtering of cube {} at {} ####".format(args.input,
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))))
# Filter the image
orig_image, header = load_image(args.input)
filtered = filter_image(orig_image, radius=args.radius, threads=args.threads)
save_image(args.output, filtered, header, args.radius)
# Report
end = time.time()
print('#### Filtering completed at %s ####' %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))
print('Filtering took %.02f s' %
(end - start))
return 0
# Run the script if it is called from the command line
if __name__ == "__main__":
exit(main())
| 32.52451 | 119 | 0.677468 | 958 | 6,635 | 4.585595 | 0.235908 | 0.043706 | 0.02595 | 0.020487 | 0.288869 | 0.213522 | 0.190758 | 0.151605 | 0.151605 | 0.131118 | 0 | 0.015577 | 0.216277 | 6,635 | 203 | 120 | 32.684729 | 0.829231 | 0.312283 | 0 | 0.121212 | 0 | 0 | 0.150137 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10101 | false | 0 | 0.070707 | 0 | 0.262626 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b7384092723174901fdb2fe1b0d1247ce21ab53 | 2,383 | py | Python | niscv_v2/experiments/simulation/resampling_ratio.py | IanFla/Importance-Sampling | f2dd2164e95377d2cf025fcddd19b2592394e4d7 | [
"Apache-2.0"
] | null | null | null | niscv_v2/experiments/simulation/resampling_ratio.py | IanFla/Importance-Sampling | f2dd2164e95377d2cf025fcddd19b2592394e4d7 | [
"Apache-2.0"
] | null | null | null | niscv_v2/experiments/simulation/resampling_ratio.py | IanFla/Importance-Sampling | f2dd2164e95377d2cf025fcddd19b2592394e4d7 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import scipy.stats as st
from niscv_v2.basics.exp import Exp
from niscv_v2.basics import utils
import multiprocessing
import os
from functools import partial
from datetime import datetime as dt
import pickle
def experiment(dim, fun, size_est, sn, show, size_kn, ratio, bootstrap):
mean = np.zeros(dim)
target = lambda x: st.multivariate_normal(mean=mean).pdf(x)
proposal = st.multivariate_normal(mean=mean + 0.5, cov=4)
grid_x = np.linspace(-5, 5, 200)
exp = Exp(dim, target, fun, proposal, size_est, sn=sn, adjust=False, show=show)
exp.initial_estimation()
exp.resampling(size_kn, ratio, bootstrap=bootstrap)
if exp.show:
exp.draw(grid_x, name='initial')
exp.density_estimation(mode=1, local=False, gamma=0.3, bdwth=1.0, alpha0=0.1)
exp.nonparametric_estimation(mode=0)
exp.nonparametric_estimation(mode=1)
exp.nonparametric_estimation(mode=2)
if exp.show:
exp.draw(grid_x, name='nonparametric')
exp.control_calculation()
exp.regression_estimation()
if exp.show:
exp.draw(grid_x, name='regression')
return exp.result, exp.params
def run(it, dim, bootstrap):
settings = [1, 2, 3, 4, -1, -2]
ratios = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
Results = []
Params = []
for setting in settings:
results = []
params = []
for ratio in ratios:
np.random.seed(19971107 + it)
print(dim, bootstrap, it, setting, ratio)
res, par = experiment(dim=dim, fun=utils.integrand(setting), size_est=10000, sn=True,
show=False, size_kn=300, ratio=ratio, bootstrap=bootstrap)
results.append(res)
params.append(par)
Results.append(results)
Params.append(params)
return [Results, Params]
def main(dim, bootstrap):
os.environ['OMP_NUM_THREADS'] = '1'
with multiprocessing.Pool(processes=60) as pool:
begin = dt.now()
its = np.arange(1000)
R = pool.map(partial(run, dim=dim, bootstrap=bootstrap), its)
end = dt.now()
print((end - begin).seconds)
with open('../../data/simulation/resampling_ratio_{}D_{}'.format(dim, bootstrap), 'wb') as file:
pickle.dump(R, file)
if __name__ == '__main__':
main(4, 'st')
main(6, 'st')
main(4, 'sp')
main(6, 'sp')
| 30.164557 | 100 | 0.632396 | 332 | 2,383 | 4.436747 | 0.376506 | 0.040733 | 0.01833 | 0.02444 | 0.131025 | 0.050917 | 0.050917 | 0.050917 | 0 | 0 | 0 | 0.04057 | 0.234578 | 2,383 | 78 | 101 | 30.551282 | 0.766996 | 0 | 0 | 0.047619 | 0 | 0 | 0.045741 | 0.018884 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.142857 | 0 | 0.222222 | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b7b7ed1542c5197c3dec6914240d11b49526c97 | 4,649 | py | Python | engraving/engraving.py | catt0/lostarkthings | 72d8756d899c01e19884eebd3978d965f9073bba | [
"MIT"
] | 1 | 2022-03-24T12:29:54.000Z | 2022-03-24T12:29:54.000Z | engraving/engraving.py | catt0/lostarkthings | 72d8756d899c01e19884eebd3978d965f9073bba | [
"MIT"
] | null | null | null | engraving/engraving.py | catt0/lostarkthings | 72d8756d899c01e19884eebd3978d965f9073bba | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# MIT License
# Copyright (c) 2022 catt0
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
PoC for an engraving calculator.
For now you need to edit the data in this file directly.
Check the comments for what to change.
"""
# Disclaimer notes:
# the simple algo has its limits, it only does works 100% and impossible
# it also prefers +3 instead of going cheaper if possible, but you see the acc list, so you can tweak that on your own
# also it always grabs the books for the first 1 or 2 engravings even though it could be better to do it for the less prio ones, but I guess that is how it is usually done anyway
from enum import Enum, auto
from pprint import pprint
class Engraving(Enum):
def __repr__(self):
return '<%s.%s>' % (self.__class__.__name__, self.name)
# add new engravings below, names must not contain spaces
Grudge = auto()
CursedDoll = auto()
KeenBlunt = auto()
Firepower = auto()
BlessedAura = auto()
# your target points
# order determines the priority
# it will first use books to fulfill these
target = [
(Engraving.Grudge, 15),
(Engraving.CursedDoll, 15),
(Engraving.KeenBlunt, 15),
(Engraving.Firepower, 15),
]
# the points you get from books
books = {
Engraving.Grudge: 12,
Engraving.Firepower: 12,
}
# the points you get from your stone
stone = {
Engraving.KeenBlunt: 8,
Engraving.Grudge: 5,
}
MAX_ACCS = 5
def find_acc_slots(accs, engraving, needed_levels):
if needed_levels <= 0:
return True
added_levels = 0
while True:
if added_levels >= needed_levels:
return True
if len(accs) < MAX_ACCS:
level_to_add = min(needed_levels - added_levels, 3)
accs.append(((engraving, level_to_add), None))
# print('Added acc with {}: {}'.format(engraving, level_to_add))
needed_levels -= level_to_add
else:
for i in range(len(accs)):
acc = accs[i]
if acc[1] is None:
level_to_add = min(needed_levels - added_levels, 3)
accs[i] = (acc[0], (engraving, level_to_add))
needed_levels -= level_to_add
# print('Added acc with {}: {}'.format(engraving, level_to_add))
if added_levels >= needed_levels:
return True
return False
accs = []
books_equipped = []
success = True
for engraving, target_level in target:
print("Trying to reach {} on {}".format(target_level, engraving))
current_level = 0
if engraving in stone:
current_level += stone[engraving]
# print('Used {} from stone'.format(engraving))
if current_level >= target_level:
print('{} reached target {}'.format(engraving, target_level))
continue
if engraving in books and len(books_equipped) < 2:
current_level += books[engraving]
books_equipped.append(engraving)
# print('Used {} from book'.format(engraving))
if current_level >= target_level:
print('{} reached target {}'.format(engraving, target_level))
continue
# print('{} at {} after books and stone'.format(engraving, current_level))
if not find_acc_slots(accs, engraving, target_level - current_level):
print('Unable to reach target {} for {}'.format(target_level, engraving))
success = False
break
print('{} reached target {}'.format(engraving, target_level))
if not success:
print('Impossible')
else:
print('Books:')
pprint(books_equipped)
print('Accessories:')
pprint(accs)
| 33.446043 | 178 | 0.670897 | 632 | 4,649 | 4.829114 | 0.371835 | 0.032438 | 0.026212 | 0.024902 | 0.201507 | 0.172674 | 0.172674 | 0.135321 | 0.135321 | 0.087156 | 0 | 0.009923 | 0.241342 | 4,649 | 138 | 179 | 33.688406 | 0.855401 | 0.449989 | 0 | 0.24 | 0 | 0 | 0.059992 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.026667 | 0.013333 | 0.2 | 0.146667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b7cc6ba3cb5669cb102ecb6c5ae731e244b2f55 | 4,447 | py | Python | tests/topo_test.py | dlgeorge/geoclaw | 2b4ce9b1ba2532fe3ac38ee7c05297eb61e45bd1 | [
"BSD-3-Clause"
] | null | null | null | tests/topo_test.py | dlgeorge/geoclaw | 2b4ce9b1ba2532fe3ac38ee7c05297eb61e45bd1 | [
"BSD-3-Clause"
] | 2 | 2018-11-05T19:41:12.000Z | 2019-03-19T00:03:38.000Z | tests/topo_test.py | BrisaDavis/geoclaw | ccab58669bc2950de13cf0f35c10b3cd1cb1cda6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import
from __future__ import print_function
import os
import glob
import tempfile
import matplotlib.pyplot as plt
from clawpack.geoclaw import topotools
import clawpack.geoclaw.topo as topo
import numpy as np
def test1():
"""
Make two topo files and then read them in and plot them.
The second file is a finer grid over a smaller region.
"""
fname = 'bowl.tt2'
maketopo1a(fname)
tpd = topotools.TopoPlotData(fname)
tpd.imshow = True
tpd.cmin = -1000.
tpd.cmax = 2000.
tpd.addcolorbar = True
tpd.plot()
fname = 'hill.tt2'
maketopo1b(fname)
tpd = topotools.TopoPlotData(fname)
tpd.imshow = True
tpd.cmin = -1000.
tpd.cmax = 2000.
tpd.addcolorbar = False
tpd.plot()
plt.title('Bathymetry / topography')
fname = 'topotest1.png'
plt.savefig(fname)
# print 'Created ',fname
def topo1(x,y):
"""
Sample topography
"""
# Parabolic bowl
z = 1000.*(x**2 + y**2 - 1.)
# Add a Gaussian hill
z = z + 1000.*np.exp(-100*((x-0.7)**2 + (y-0.8)**2))
return z
def maketopo1a(path):
"""
Output topography file for the entire domain
"""
nxpoints = 101
nypoints = 76
xlower = -1.5
xupper = 2.5
ylower = -1.
yupper = 2.
topotools.topo2writer(path,topo1,xlower,xupper,ylower,yupper,\
nxpoints,nypoints)
def maketopo1b(path):
"""
Output topography file for the entire domain
"""
nxpoints = 101
nypoints = 71
xlower = 0.0
xupper = 1.0
ylower = 0.5
yupper = 1.2
topotools.topo2writer(path,topo1,xlower,xupper,ylower,yupper,\
nxpoints,nypoints)
def test_topography_object(plot=False):
"""
Test the Topography object's functionality
"""
try:
base_path = tempfile.mkdtemp()
# Create initial test bathymetry
maketopo1a(os.path.join(base_path, 'bowl.tt2'))
maketopo1b(os.path.join(base_path, 'hill.tt2'))
hill_topo = []
bowl_topo = []
topo_types = [2,3,1,2]
for (n, topo_type) in enumerate(topo_types):
bowl_path = os.path.join(base_path, 'bowl.tt%s' % topo_type)
hill_path = os.path.join(base_path, 'hill.tt%s' % topo_type)
bowl_topo.append(topo.Topography(bowl_path))
hill_topo.append(topo.Topography(hill_path))
if plot:
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
hill_topo[-1].plot(axes=axes, limits=[-2000,0])
bowl_topo[-1].plot(axes=axes, limits=[-2000,0])
fig.suptitle('Bathymetry / topography, topo type = %s' % topo_type)
plt.savefig('topotest%s.png' % (n + 2))
print(n, topo_type)
if n + 1 != len(topo_types):
bowl_path = os.path.join(base_path, 'bowl.tt%s' % topo_types[n+1])
hill_path = os.path.join(base_path, 'hill.tt%s' % topo_types[n+1])
bowl_topo[-1].write(bowl_path)
hill_topo[-1].write(hill_path)
# Check data
for (n,topo_type) in enumerate(topo_types):
for (m,topo_type) in enumerate(topo_types):
assert np.all(bowl_topo[n].X == bowl_topo[m].X), \
"bowl[%s].X != bowl[%s].X" % (n,m)
assert np.all(bowl_topo[n].Y == bowl_topo[m].Y), \
"bowl[%s].Y != bowl[%s].Y" % (n,m)
assert np.all(bowl_topo[n].Z == bowl_topo[m].Z), \
"bowl[%s].Z != bowl[%s].Z" % (n,m)
assert np.all(hill_topo[n].X == hill_topo[m].X), \
"hill[%s].X != hill[%s].X" % (n,m)
assert np.all(hill_topo[n].Y == hill_topo[m].Y), \
"hill[%s].Y != hill[%s].Y" % (n,m)
assert np.all(hill_topo[n].Z == hill_topo[m].Z), \
"hill[%s].Z != hill[%s].Z" % (n,m)
finally:
paths = glob.glob(os.path.join(base_path,"*"))
for path in paths:
os.remove(path)
os.rmdir(base_path)
if __name__=='__main__':
print("Starting procedural test...")
test1()
print("Done performing procedural test.")
print("Starting object test...")
test_topography_object(plot=True)
print("Done performing object test...") | 28.690323 | 83 | 0.555431 | 600 | 4,447 | 3.993333 | 0.238333 | 0.033389 | 0.029215 | 0.040902 | 0.397746 | 0.390234 | 0.345993 | 0.342654 | 0.250417 | 0.250417 | 0 | 0.033473 | 0.301327 | 4,447 | 155 | 84 | 28.690323 | 0.737689 | 0.089723 | 0 | 0.174757 | 0 | 0 | 0.106485 | 0 | 0 | 0 | 0 | 0 | 0.058252 | 1 | 0.048544 | false | 0 | 0.087379 | 0 | 0.145631 | 0.058252 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b7cddd851735817010a31afc1a8841a61a5dfc8 | 3,443 | py | Python | moveit/moveit.py | pavel-paulau/moveit | 9572f3b4330b058c4cb324c774fff443a1ccb2f9 | [
"Apache-2.0"
] | 2 | 2015-09-28T13:54:43.000Z | 2015-09-28T16:59:43.000Z | moveit/moveit.py | pavel-paulau/moveit | 9572f3b4330b058c4cb324c774fff443a1ccb2f9 | [
"Apache-2.0"
] | null | null | null | moveit/moveit.py | pavel-paulau/moveit | 9572f3b4330b058c4cb324c774fff443a1ccb2f9 | [
"Apache-2.0"
] | 1 | 2020-03-10T20:17:28.000Z | 2020-03-10T20:17:28.000Z | #!/usr/bin/env python
import argparse
import json
from collections import defaultdict
def read_data(fname):
raw_data = defaultdict(list)
with open(fname) as fh:
for line in fh.readlines():
event = json.loads(line.strip())
if event['type'] == 'rebalanceStart': # only last rebalance events
raw_data = defaultdict(list)
if event.get('bucket') not in (None, 'undefined'):
raw_data[event['bucket']].append(event)
return raw_data
def parse_events(data):
_data = defaultdict(dict)
for bucket, events in data.items():
_data[bucket] = defaultdict(list)
for event in sorted(events, key=lambda event: event['ts']):
vbucket = event.get('vbucket')
if vbucket:
_data[bucket][vbucket].append((event['type'], event['ts']))
return _data
def calc_total_time(events):
done = start = None
for event, ts in events:
if event == 'vbucketMoveDone':
done = ts
elif event == 'dcpAddStream' and start is None:
start = ts
if done and start:
return done - start
def find_hot_spots(events, total_time, threshold):
prev = prev_event = None
for event, ts in events:
if event in ('updateFastForwardMap', 'vbucketStateChange'):
continue
if prev:
delta = 100 * (ts - prev) / total_time
if delta > threshold:
yield (prev_event, event, delta)
prev = ts
prev_event = event
def analyze_events(data, threshold):
for bucket, vbuckets in data.items():
timings = []
hotspots = defaultdict(list)
for vbucket, events in vbuckets.items():
total_time = calc_total_time(events)
if not total_time:
continue
timings.append((vbucket, total_time))
for hotspot in find_hot_spots(events, total_time, threshold):
hotspots[vbucket].append(hotspot)
try:
report(bucket, timings, hotspots)
except IOError:
pass
def report(bucket, timings, hotspots):
mean = sum(total for vbucket, total in timings) / len(timings)
_max = max(total for vbucket, total in timings)
_min = min(total for vbucket, total in timings)
summary = '{{:>{}}}: {{}} movements, ' \
'mean: {{:.1f}}s, max: {{:.1f}}s, min: {{:.1f}}s'.format(len(bucket))
vb_summary = '{{:>{}}}: {{:.1f}}s'.format(len(bucket))
hotspot = '{}{{}} -> {{}}: {{:.1f}}%'.format(''.rjust(len(bucket) + 2))
print(summary.format(bucket, len(timings), mean, _max, _min))
for vbucket, total_time in timings:
print(vb_summary.format(vbucket, total_time))
for prev_event, event, delta in hotspots[vbucket]:
print(hotspot.format(prev_event, event, delta))
def main():
parser = argparse.ArgumentParser(prog='moveit')
parser.add_argument('-t', dest='threshold', type=float, default=0,
help='hotspot threshold in %%')
parser.add_argument('filename', type=str, help='path to master events log')
args = parser.parse_args()
if not 0 <= args.threshold <= 100:
parser.error('threshold must be in 0 to 100 range')
raw_data = read_data(fname=args.filename)
data = parse_events(data=raw_data)
analyze_events(data, args.threshold)
if __name__ == '__main__':
main()
| 32.481132 | 79 | 0.598606 | 415 | 3,443 | 4.836145 | 0.286747 | 0.044843 | 0.027902 | 0.028401 | 0.126059 | 0.108122 | 0.064773 | 0.028899 | 0 | 0 | 0 | 0.007197 | 0.273599 | 3,443 | 105 | 80 | 32.790476 | 0.795282 | 0.013651 | 0 | 0.072289 | 0 | 0 | 0.103712 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084337 | false | 0.012048 | 0.036145 | 0 | 0.156627 | 0.036145 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b7e960044cdd21ef1c64e33f14c9a00d918de92 | 516 | py | Python | print_spelling_suggestions.py | tmills/econsult | a702948278bd9995623026702bcd4c4c9bd47159 | [
"Apache-2.0"
] | null | null | null | print_spelling_suggestions.py | tmills/econsult | a702948278bd9995623026702bcd4c4c9bd47159 | [
"Apache-2.0"
] | 3 | 2021-03-25T22:10:23.000Z | 2021-06-01T22:48:12.000Z | print_spelling_suggestions.py | tmills/econsult | a702948278bd9995623026702bcd4c4c9bd47159 | [
"Apache-2.0"
] | 1 | 2019-04-12T19:02:12.000Z | 2019-04-12T19:02:12.000Z | #!/usr/bin/env python
from pattern.en import spelling
import sys
def main(args):
if len(args) < 1:
sys.stderr.write("1 required argument: <input file>")
with open(args[0], 'r') as f:
for line in f.readlines():
word = line.rstrip()
try:
suggestions = spelling.suggest(word)
except:
suggestions = "No suggestions"
print('%s %s' % (word, str(suggestions)))
if __name__ == '__main__':
main(sys.argv[1:]) | 24.571429 | 61 | 0.542636 | 63 | 516 | 4.31746 | 0.698413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011429 | 0.321705 | 516 | 21 | 62 | 24.571429 | 0.765714 | 0.03876 | 0 | 0 | 0 | 0 | 0.122984 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b7f23ac39d6f288d7f8df3e114d47ba4ac7749a | 391 | py | Python | setup.py | opalmer/vpsutil | 3d589f0c6184a9ca79f0a96af1181bc4466b7095 | [
"MIT"
] | null | null | null | setup.py | opalmer/vpsutil | 3d589f0c6184a9ca79f0a96af1181bc4466b7095 | [
"MIT"
] | null | null | null | setup.py | opalmer/vpsutil | 3d589f0c6184a9ca79f0a96af1181bc4466b7095 | [
"MIT"
] | null | null | null | from distutils.core import setup
requires = ["requests", "paramiko"]
try:
import configparser
except ImportError:
requires.append("configparser")
setup(
name="vpsutil",
version="0.0.0",
license="MIT",
packages=["vpsutil"],
install_requires=requires,
entry_points={
"console_scripts": [
"ocean = vpsutil.command:ocean"
]
}
)
| 17.772727 | 43 | 0.618926 | 38 | 391 | 6.289474 | 0.710526 | 0.016736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010169 | 0.245524 | 391 | 21 | 44 | 18.619048 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0.240409 | 0.053708 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b7fffb88d3d8522751df917d4f9168bc42ba45e | 4,117 | py | Python | aitlas/datasets/sat6.py | biasvariancelabs/aitlas | e36913c44d5a8393566b7271607ba839f9be0df3 | [
"MIT"
] | 32 | 2020-12-04T19:48:19.000Z | 2022-03-16T18:18:05.000Z | aitlas/datasets/sat6.py | biasvariancelabs/aitlas | e36913c44d5a8393566b7271607ba839f9be0df3 | [
"MIT"
] | 2 | 2021-04-11T17:09:14.000Z | 2021-05-14T13:22:41.000Z | aitlas/datasets/sat6.py | biasvariancelabs/aitlas | e36913c44d5a8393566b7271607ba839f9be0df3 | [
"MIT"
] | 8 | 2021-04-06T22:06:27.000Z | 2022-01-30T06:01:39.000Z | import csv
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.io
import numpy as np
import random
from ..base import BaseDataset
from .schemas import MatDatasetSchema
"""
The format of the mat dataset is:
train_x 28x28x4x400000 uint8 (containing 400000 training samples of 28x28 images each with 4 channels)
train_y 400000x6 uint8 (containing 6x1 vectors having labels for the 400000 training samples)
test_x 28x28x4x100000 uint8 (containing 100000 test samples of 28x28 images each with 4 channels)
test_y 100000x6 uint8 (containing 6x1 vectors having labels for the 100000 test samples)
"""
LABELS = ["barren land", "trees", "grassland", "roads", "buildings", "water bodies"]
class SAT6(BaseDataset):
schema = MatDatasetSchema
url = "http://csc.lsu.edu/~saikat/deepsat/"
labels = LABELS
name = "SAT-6 dataset"
def __init__(self, config):
# now call the constructor to validate the schema
BaseDataset.__init__(self, config)
# load the data
self.mode = self.config.mode
self.data = self.load_dataset(self.config.mat_file_path)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
# load image
img = self.data[index][0]
# apply transformations
if self.transform:
img = self.transform(img)
target = self.data[index][1]
if self.target_transform:
target = self.target_transform(self.data[index][1])
return img, target
def __len__(self):
return len(self.data)
def get_labels(self):
return self.labels
def data_distribution_table(self):
mat_data = scipy.io.loadmat(self.config.mat_file_path)
img_labels = mat_data[f'{self.mode}_y'].transpose()
data = list(np.where(img_labels == 1)[1])
res_list = [[i, self.labels[index]] for i, index in enumerate(data)]
df = pd.DataFrame(res_list, columns=['id', 'Label'])
label_count = df.groupby("Label").count().reset_index()
label_count.columns = ['Label', 'Count']
return label_count
def data_distribution_barchart(self):
label_count = self.data_distribution_table()
fig, ax = plt.subplots(figsize=(12, 10))
sns.barplot(y="Label", x="Count", data=label_count, ax=ax)
return fig
def show_image(self, index):
label = self.labels[self[index][1]]
fig = plt.figure(figsize=(8, 6))
plt.title(f"Image with index {index} from the dataset {self.get_name()}, with label {label}\n",
fontsize=14)
plt.axis('off')
plt.imshow(self[index][0])
return fig
def show_batch(self, size):
if size % 3:
raise ValueError(
"The provided size should be divided by 4!"
)
image_indices = random.sample(range(0, len(self.data)), size)
figure_height = int(size / 3) * 4
figure, ax = plt.subplots(int(size / 3), 3, figsize=(20, figure_height))
figure.suptitle("Example images with labels from {}".format(self.get_name()), fontsize=32)
for axes, image_index in zip(ax.flatten(), image_indices):
axes.imshow(self[image_index][0])
axes.set_title(self.labels[self[image_index][1]], fontsize=18)
axes.set_xticks([])
axes.set_yticks([])
figure.tight_layout()
# figure.subplots_adjust(top=1.0)
return figure
def load_dataset(self, file_path):
if not self.labels:
raise ValueError(
"You need to provide the list of labels for the dataset"
)
data = []
if file_path:
mat_data = scipy.io.loadmat(file_path)
images = mat_data[f'{self.mode}_x'].transpose(3, 0, 1, 2)
img_labels = mat_data[f'{self.mode}_y'].transpose()
data = list(zip(images[:, :, :, 0:3], np.where(img_labels == 1)[1]))
return data
| 35.188034 | 103 | 0.621326 | 545 | 4,117 | 4.557798 | 0.321101 | 0.022544 | 0.014493 | 0.014493 | 0.153784 | 0.113527 | 0.099034 | 0.099034 | 0.034622 | 0.034622 | 0 | 0.039643 | 0.264756 | 4,117 | 116 | 104 | 35.491379 | 0.780971 | 0.057323 | 0 | 0.074074 | 0 | 0 | 0.112581 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0.024691 | 0.382716 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b8206189230c459d344d826bd836366d971ebcf | 6,768 | py | Python | src/config_gui.py | blaubachn/slideshow | 8f87db77502ee89a689a884ac2455d4019b20363 | [
"MIT"
] | null | null | null | src/config_gui.py | blaubachn/slideshow | 8f87db77502ee89a689a884ac2455d4019b20363 | [
"MIT"
] | null | null | null | src/config_gui.py | blaubachn/slideshow | 8f87db77502ee89a689a884ac2455d4019b20363 | [
"MIT"
] | null | null | null | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from config import Configuration_Manager
about_text = '''Welcome to Slideshow!
Photo Directory: The remote will diplay a slideshow of the photos in this folder (excluding photos in it's subfolders)
Installation Type: flatpak commands have an extra prefix, so select the method you used for installation
If Eye of Gnome was pre-installed on the system, the correct installation type is likely the standard/snap option.
Once the Save and Start button is pressed, the web service will start on this machine. You will not be able to change
the settings unless the web service is stopped. This can be done using ctrl+c in the terminal window running this app'''
class ConfigurationManagerGui(Gtk.Window):
def __init__(self, configuration_manager, server_should_run):
self.configuration_manager = configuration_manager
self.server_should_run = server_should_run
current_configuration = self.configuration_manager.get_config()
Gtk.Window.__init__(self, title="Slideshow")
grid = Gtk.Grid()
self.add(grid)
spacing = 30
self.set_border_width(spacing)
grid.set_column_spacing(spacing)
grid.set_row_spacing(spacing)
#------------------------------------------------------------------------------
# About Label
#------------------------------------------------------------------------------
self.about_label = Gtk.Label(about_text)
#------------------------------------------------------------------------------
# Directory Label
#------------------------------------------------------------------------------
self.directory_label = Gtk.Label("Photo Directory")
#------------------------------------------------------------------------------
# Directory Entry
#------------------------------------------------------------------------------
self.directory_entry = Gtk.Entry()
self.directory_entry.set_text(current_configuration['directory'])
self.directory_entry.set_width_chars(80)
#------------------------------------------------------------------------------
# Directory Button
#------------------------------------------------------------------------------
self.directory_button = Gtk.Button("Choose...")
self.directory_button.connect("clicked", self.on_directory_button_clicked)
#------------------------------------------------------------------------------
# Installation Label
#------------------------------------------------------------------------------
self.installation_label = Gtk.Label("Installation Type")
#------------------------------------------------------------------------------
# Installation Combo
#------------------------------------------------------------------------------
installation_store = Gtk.ListStore(str)
installation_types = configuration_manager.get_installation_types()
for installation_type in installation_types:
installation_store.append([installation_type])
self.installation_combo = Gtk.ComboBox.new_with_model(installation_store)
renderer_text = Gtk.CellRendererText()
self.installation_combo.pack_start(renderer_text, True)
self.installation_combo.add_attribute(renderer_text, "text", 0)
self.installation_combo.set_active(installation_types.index(current_configuration['install']))
#------------------------------------------------------------------------------
# Save Button
#------------------------------------------------------------------------------
self.save_button = Gtk.Button("Save")
self.save_button.connect("clicked", self.on_save_button_clicked)
#------------------------------------------------------------------------------
# Cancel Button
#------------------------------------------------------------------------------
self.cancel_button = Gtk.Button("Cancel")
self.cancel_button.connect("clicked", self.on_cancel_button_clicked)
#------------------------------------------------------------------------------
# Save and Continue Button
#------------------------------------------------------------------------------
self.save_and_start_button = Gtk.Button("Save and Continue")
self.save_and_start_button.connect("clicked", self.on_save_and_start_button_clicked)
#------------------------------------------------------------------------------
# Layout
#------------------------------------------------------------------------------
grid.attach(self.about_label, 0,0,3,1)
grid.attach(self.directory_label, 0,1,1,1)
grid.attach(self.directory_entry, 1,1,1,1)
grid.attach(self.directory_button, 2,1,1,1)
grid.attach(self.installation_label, 0,2,1,1)
grid.attach(self.installation_combo, 1,2,1,1)
grid.attach(self.save_button, 2,2,1,1)
grid.attach(self.cancel_button, 0,3,1,1)
grid.attach(self.save_and_start_button, 2,3,1,1)
def run(self):
self.connect("destroy", Gtk.main_quit)
self.show_all()
Gtk.main()
def on_directory_button_clicked(self, widget):
dialog = Gtk.FileChooserDialog(
"Please choose a folder",
self,
Gtk.FileChooserAction.SELECT_FOLDER,
(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
"Select",
Gtk.ResponseType.OK
)
)
dialog.set_default_size(800, 400)
if dialog.run() == Gtk.ResponseType.OK:
self.directory_entry.set_text(dialog.get_filename())
dialog.destroy()
def on_save_button_clicked(self, button):
active_iter = self.installation_combo.get_active_iter()
self.configuration_manager.set_config(
self.directory_entry.get_text(),
self.installation_combo.get_model()[active_iter][0]
)
def on_cancel_button_clicked(self, button):
self.server_should_run.prevent()
self.destroy()
def on_save_and_start_button_clicked(self, button):
active_iter = self.installation_combo.get_active_iter()
self.configuration_manager.set_config(
self.directory_entry.get_text(),
self.installation_combo.get_model()[active_iter][0]
)
self.server_should_run.allow()
self.destroy()
| 45.12 | 120 | 0.494385 | 620 | 6,768 | 5.154839 | 0.254839 | 0.048811 | 0.059136 | 0.037547 | 0.241239 | 0.185544 | 0.119524 | 0.103254 | 0.103254 | 0.103254 | 0 | 0.009506 | 0.207299 | 6,768 | 149 | 121 | 45.422819 | 0.586207 | 0.253694 | 0 | 0.10989 | 0 | 0.043956 | 0.152161 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065934 | false | 0 | 0.032967 | 0 | 0.10989 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b834a7d97ab58b448634df72525ae3e7aa49bda | 391 | py | Python | PythonExercicios/ex033.py | Lucas-ns/Python-3-Curso-Em-Video | f6d338fffd7a4606d34fab09634eea0fe4b3dfb3 | [
"MIT"
] | null | null | null | PythonExercicios/ex033.py | Lucas-ns/Python-3-Curso-Em-Video | f6d338fffd7a4606d34fab09634eea0fe4b3dfb3 | [
"MIT"
] | null | null | null | PythonExercicios/ex033.py | Lucas-ns/Python-3-Curso-Em-Video | f6d338fffd7a4606d34fab09634eea0fe4b3dfb3 | [
"MIT"
] | null | null | null | n1 = int(input('Primeiro Valor: '))
n2 = int(input('Segundo Valor: '))
n3 = int(input('Terceiro Valor: '))
maior = n1
menor = n1
if n2 < n3 and n2 < n1:
menor = n2
if n3 < n2 and n3 < n1:
menor = n3
print('O menor valor digitado foi {}'.format(menor))
if n2 > n3 and n2 > n1:
maior = n2
if n3 > n2 and n3 > n1:
maior = n3
print('O maior valor digitado foi {}'.format(maior))
| 24.4375 | 52 | 0.608696 | 67 | 391 | 3.552239 | 0.268657 | 0.10084 | 0.05042 | 0.07563 | 0.235294 | 0.235294 | 0.12605 | 0 | 0 | 0 | 0 | 0.083893 | 0.237852 | 391 | 15 | 53 | 26.066667 | 0.714765 | 0 | 0 | 0 | 0 | 0 | 0.268542 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b85c3711ce945544b892923a3692a030d199f9f | 2,705 | py | Python | processing/data_collection/gazette/spiders/rj_rio_de_janeiro.py | gabubellon/querido-diario | b783dac359b86121173286869a8e0bbd31cf22af | [
"MIT"
] | null | null | null | processing/data_collection/gazette/spiders/rj_rio_de_janeiro.py | gabubellon/querido-diario | b783dac359b86121173286869a8e0bbd31cf22af | [
"MIT"
] | null | null | null | processing/data_collection/gazette/spiders/rj_rio_de_janeiro.py | gabubellon/querido-diario | b783dac359b86121173286869a8e0bbd31cf22af | [
"MIT"
] | null | null | null | from gazette.items import Gazette
import datetime as dt
import re
import scrapy
from gazette.spiders.base import BaseGazetteSpider
class RjRioDeJaneiroSpider(BaseGazetteSpider):
TERRITORY_ID = "3304557"
name = "rj_rio_de_janeiro"
allowed_domains = ["doweb.rio.rj.gov.br"]
start_urls = ["http://doweb.rio.rj.gov.br"]
search_gazette_url = "http://doweb.rio.rj.gov.br/?buscar_diario=ok&tipo=1&data_busca={}" # format 20/04/2018
download_gazette_url = "http://doweb.rio.rj.gov.br/ler_pdf.php?download=ok&edi_id={}" # 20/04/2018 has edi_id = 3734
def parse(self, response):
parsing_date = dt.date.today()
end_date = dt.date(2015, 1, 1)
while parsing_date >= end_date:
url = self.search_gazette_url.format(parsing_date.strftime("%d/%m/%Y"))
yield scrapy.Request(
url, self.parse_search_by_date, meta={"gazette_date": parsing_date}
)
parsing_date = parsing_date - dt.timedelta(days=1)
def parse_search_by_date(self, response):
gazette_date = response.meta.get("gazette_date")
no_gazettes = response.css("#dialog-message").extract_first()
if no_gazettes and "Não existe publicação para esta data" in no_gazettes:
return
items = []
one_gazette = response.css(
"#conteudo_principal > #conteudo_home > #conteudo_erro script"
).extract_first()
if one_gazette:
match = re.search(".*edi_id=([0-9]+).*", one_gazette)
if match:
url = self.download_gazette_url.format(match.group(1))
items.append(self.create_gazette(gazette_date, url))
multiple_gazettes = response.css("#dialog-message").extract_first()
if (
multiple_gazettes
and "Existe mais de uma publicação para esta data" in multiple_gazettes
):
editions = response.css("#dialog-message a").extract()
for ed in editions:
match = re.search(".*edi_id=([0-9]+).*", ed)
if match:
url = self.download_gazette_url.format(match.group(1))
is_extra_edition = "suplemento" in ed.lower()
items.append(
self.create_gazette(gazette_date, url, is_extra_edition)
)
return items
def create_gazette(self, date, url, is_extra_edition=False):
return Gazette(
date=date,
file_urls=[url],
is_extra_edition=is_extra_edition,
territory_id=self.TERRITORY_ID,
power="executive",
scraped_at=dt.datetime.utcnow(),
)
| 38.642857 | 121 | 0.607024 | 332 | 2,705 | 4.722892 | 0.355422 | 0.042092 | 0.044643 | 0.033163 | 0.335459 | 0.249362 | 0.237245 | 0.211735 | 0.0625 | 0.0625 | 0 | 0.021069 | 0.280592 | 2,705 | 69 | 122 | 39.202899 | 0.784687 | 0.017006 | 0 | 0.067797 | 0 | 0.033898 | 0.176958 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.084746 | 0.016949 | 0.305085 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b86ad80d46cd8819ce8434a7d769597836cdf0f | 3,502 | py | Python | 2c-AutopilotByColorSegmentation.py | Branyac/tello-innovation-challenge | 173087677bf2f964785ab76fae146b12e352ee0b | [
"MIT"
] | 1 | 2022-01-30T19:34:45.000Z | 2022-01-30T19:34:45.000Z | 2c-AutopilotByColorSegmentation.py | Branyac/tello-innovation-challenge | 173087677bf2f964785ab76fae146b12e352ee0b | [
"MIT"
] | null | null | null | 2c-AutopilotByColorSegmentation.py | Branyac/tello-innovation-challenge | 173087677bf2f964785ab76fae146b12e352ee0b | [
"MIT"
] | 1 | 2021-06-23T04:13:43.000Z | 2021-06-23T04:13:43.000Z | from djitellopy import Tello
import cv2
import numpy as np
import time
# Values for color segmentation
# It match an orange battery
LOWER = np.array([0, 239, 180])
UPPER = np.array([30, 255, 255])
DESIRED_OBJECT_SIZE = 100
MAX_SPEED_FORWARDBACK = 50
MAX_SPEED_UPDOWN = 50
MAX_SPEED_YAW = 100
MIN_MOV_TIME = 0.15
def calculate_velocity(frame_size, center_of_object, max_speed):
center_of_frame = int(frame_size / 2)
distance = center_of_object - center_of_frame
return int(max_speed * (distance / frame_size)) * 2
def main():
tello = Tello()
tello.connect()
tello.streamon()
frame_read = tello.get_frame_read()
tello.takeoff()
tello.move_up(40)
try:
while True:
# Get frame
frame = frame_read.frame
# Get battery contours
imgHsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(imgHsv, LOWER, UPPER)
#res = cv2.bitwise_and(frame, frame, mask=mask)
battery_contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
# If battery is on image, detect contour
xg = yg = wg = hg = None
if len(battery_contours) > 0:
battery_area = max(battery_contours, key=cv2.contourArea)
(xg, yg, wg, hg) = cv2.boundingRect(battery_area)
if max(xg+wg, yg+hg)> 3: # I set an arbitrary number to prevent false positives
cv2.rectangle(frame, (xg, yg), (xg+wg, yg+hg), (0, 255, 0), 2)
cv2.drawContours(frame, battery_contours, -1, (0,255,0), 3)
# Show images
cv2.imshow('Webcam', frame)
#cv2.imshow('Mask', mask)
#cv2.imshow('Segmented Image', res)
# Exit when user press ESC key
k = cv2.waitKey(3) & 0xFF
if k == 27: # ESC Key
break
velocity_fb = velocity_lr = velocity_ud = velocity_yaw = 0
if not xg is None:
# Move the drone
object_center_x = int(xg + (wg / 2))
object_center_y = int(yg + (hg / 2))
object_size = ((wg ** 2) + (hg ** 2)) ** 0.5 # Fast sqrt
object_distance = DESIRED_OBJECT_SIZE - object_size
if not object_distance == 0:
velocity_fb = int(MAX_SPEED_FORWARDBACK * (object_distance / DESIRED_OBJECT_SIZE))
frame_shape = frame.shape
# I wrote 'object_center_y + 200' because the camera of Tello drone is slightly inclined to down and that causes the drone to go too high
velocity_ud = calculate_velocity(frame_shape[1], object_center_y + 200, MAX_SPEED_UPDOWN * -1)
velocity_yaw = calculate_velocity(frame_shape[0], object_center_x, MAX_SPEED_YAW)
# First rotate, then go forward
if not velocity_yaw == 0:
tello.send_rc_control(0, 0, 0, velocity_yaw)
time.sleep(MIN_MOV_TIME)
if not velocity_lr == velocity_fb == velocity_ud == 0:
tello.send_rc_control(velocity_lr, velocity_fb, velocity_ud, 0)
time.sleep(MIN_MOV_TIME)
tello.send_rc_control(0, 0, 0, 0)
finally:
tello.land()
tello.streamoff()
tello.end()
# When everything done, release the capture
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | 35.734694 | 153 | 0.58538 | 454 | 3,502 | 4.288546 | 0.354626 | 0.032871 | 0.026194 | 0.027735 | 0.114535 | 0.053416 | 0.053416 | 0 | 0 | 0 | 0 | 0.040842 | 0.321816 | 3,502 | 98 | 154 | 35.734694 | 0.778947 | 0.161622 | 0 | 0.03125 | 0 | 0 | 0.004796 | 0 | 0 | 0 | 0.00137 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.0625 | 0 | 0.109375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b8a00b506487aff2b0ea1fefe241cb675cb897d | 4,169 | py | Python | gcloud/commons/tastypie/resources.py | springborland/bk-sops | a9057672c10efb5f2414a805a30ead4092429c76 | [
"Apache-2.0"
] | 1 | 2021-05-19T04:31:34.000Z | 2021-05-19T04:31:34.000Z | gcloud/commons/tastypie/resources.py | sighttviewliu/bk-sops | 6bf2f38bd93990f20f7c3a4decafc310e09e679c | [
"Apache-2.0"
] | null | null | null | gcloud/commons/tastypie/resources.py | sighttviewliu/bk-sops | 6bf2f38bd93990f20f7c3a4decafc310e09e679c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from tastypie.http import HttpForbidden
from tastypie.resources import ModelResource
from tastypie.exceptions import ImmediateHttpResponse, NotFound
from django.db.models import Q
from haystack.query import SearchQuerySet
from iam.contrib.tastypie.resource import IAMResourceMixin
from .serializers import AppSerializer
class GCloudModelResource(IAMResourceMixin, ModelResource):
login_exempt = False
def wrap_view(self, view):
"""
@summary: 统一处理函数装饰逻辑
"""
view = super(GCloudModelResource, self).wrap_view(view)
setattr(view, "login_exempt", self.login_exempt)
return view
def determine_format(self, request):
"""
@summary: 强制指定返回数据格式为json
"""
return "application/json"
def unauthorized_result(self, exception):
"""
@summary: return 403 if operation is forbidden, while default of tastypie is 401
@return:
"""
raise ImmediateHttpResponse(response=HttpForbidden())
def build_filters(self, filters=None, ignore_bad_filters=False):
"""
@summary:
"""
if filters is None:
filters = {}
orm_filters = super(GCloudModelResource, self).build_filters(filters, ignore_bad_filters)
if filters.get("q", "").strip():
if getattr(self.Meta, "q_fields", []):
queries = [Q(**{"%s__contains" % field: filters["q"]}) for field in self.Meta.q_fields]
query = queries.pop()
for item in queries:
query |= item
orm_filters["q"] = query
else:
sqs = (
SearchQuerySet()
.models(self._meta.object_class)
.auto_query(filters["q"])
.query_facet(self.Meta.q_fields)
)
# 创建自定义定过滤条件
orm_filters["pk__in"] = [i.pk for i in sqs]
return orm_filters
def apply_filters(self, request, applicable_filters):
"""
@summary:
"""
if "q" in applicable_filters:
query = applicable_filters.pop("q")
else:
query = None
queryset = super(GCloudModelResource, self).apply_filters(request, applicable_filters)
return queryset.filter(query) if query else queryset
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
if not hasattr(bundle.obj, "delete"):
try:
bundle.obj = self.obj_get(bundle=bundle, **kwargs)
except self.Meta.object_class.DoesNotExist:
raise NotFound("A model instance matching the " "provided arguments could not be found")
self.authorized_delete_detail(self.get_object_list(bundle.request), bundle)
if "is_deleted" in bundle.obj.__dict__:
bundle.obj.__dict__.update({"is_deleted": True})
bundle.obj.save()
else:
bundle.obj.delete()
class Meta:
serializer = AppSerializer()
always_return_data = True
# 控制 Resource 一次显示多少个结果。默认值为 API_LIMIT_PER_PAGE 设置(如果设置)或20(如果未设置)
limit = 0
# 控制 Resource 一次显示的最大结果数。如果用户指定的 limit 高于 max_limit,它将被限制为 max_limit
max_limit = 0
| 35.939655 | 115 | 0.633245 | 478 | 4,169 | 5.39749 | 0.449791 | 0.023256 | 0.032558 | 0.017442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006981 | 0.278484 | 4,169 | 115 | 116 | 36.252174 | 0.850731 | 0.277285 | 0 | 0.04918 | 0 | 0 | 0.053873 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098361 | false | 0 | 0.114754 | 0 | 0.327869 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b8ba5e38a11991c07975fe01f68ff669ab459c0 | 811 | py | Python | badx12/common/click.py | agaddis02/badX12 | 7362a4d9629e570be8cd3b42af5210cda39e0efc | [
"MIT"
] | null | null | null | badx12/common/click.py | agaddis02/badX12 | 7362a4d9629e570be8cd3b42af5210cda39e0efc | [
"MIT"
] | null | null | null | badx12/common/click.py | agaddis02/badX12 | 7362a4d9629e570be8cd3b42af5210cda39e0efc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import Iterable
import click
def add_commands(click_group, commands):
if not isinstance(click_group, click.core.Group):
raise TypeError(
f"add_commands() expects click.core.Group for click_group, got {type(click_group)}"
)
if not isinstance(commands, Iterable):
raise TypeError(
f"add_commands() expects an Iterable type for commands, got {type(commands)}"
)
for command in commands:
if not isinstance(command, click.core.Command) and not isinstance(
command, click.core.Group
):
raise TypeError(
f"commands must be of type click.core.Command or click.core.Group, got {type(command)}"
)
click_group.add_command(command)
| 30.037037 | 103 | 0.633785 | 99 | 811 | 5.10101 | 0.313131 | 0.106931 | 0.110891 | 0.091089 | 0.312871 | 0.215842 | 0 | 0 | 0 | 0 | 0 | 0.001698 | 0.273736 | 811 | 26 | 104 | 31.192308 | 0.855688 | 0.025894 | 0 | 0.157895 | 0 | 0.052632 | 0.30203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b8d70280f0cb0d3625c06ba1c69384706890a4a | 2,602 | py | Python | sequence/fastx_translate.py | shenwei356/bio_scripts | 703cec8d21903516346e2aae4d77d23385c30905 | [
"MIT"
] | 94 | 2015-03-26T04:32:29.000Z | 2022-03-22T13:44:11.000Z | sequence/fastx_translate.py | xinwang-bio/bio_scripts | 64fda3a72ba14edf87952a809c3d52871f155cca | [
"MIT"
] | null | null | null | sequence/fastx_translate.py | xinwang-bio/bio_scripts | 64fda3a72ba14edf87952a809c3d52871f155cca | [
"MIT"
] | 70 | 2015-04-01T10:27:05.000Z | 2021-11-08T01:46:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://github.com/shenwei356/bio_scripts
from __future__ import print_function
import argparse
import gzip
import logging
import os
import re
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def parse_args():
parser = argparse.ArgumentParser(description="Translate DNA to peptide")
parser.add_argument("-v", "--verbose", help='verbosely print information',
action="count", default=0)
group = parser.add_mutually_exclusive_group()
group.add_argument("--stdin", action="store_true",
help='read from stdin, one sequence per line')
group.add_argument('-i', '--infile', type=str,
help='file name should like this: infile.[fasta|fa|fastq|fq][.gz]')
parser.add_argument('-f', '--format', type=str, # default='fasta',
help='seqence format: fasta |fastq [fasta]')
parser.add_argument('-t', '--table', type=int, default=1,
help='genetic code table (detail: http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi ) [1]')
args = parser.parse_args()
if not (args.stdin or args.infile):
sys.stderr.write("option --stdin or -i should be given\n")
sys.exit(1)
if args.format and not args.format in ['fasta', 'fastq']:
sys.stderr.write("option -f | --format should be 'fasta' or 'fastq'\n")
sys.exit(1)
if args.stdin and not args.format:
sys.stderr.write("option -f | --format should be given when --stdin is set.\n")
sys.exit(1)
return args
if __name__ == '__main__':
args = parse_args()
file, seq_format, fh = args.infile, args.format, None,
if file:
if not seq_format:
found = re.search(r'(?i)(fasta|fa|fastq|fq)(.gz)?$', file)
if not found:
print("invalid file name suffix.\nfile name should like this: infile.[fasfa|fa|fastq|fq][.gz]",
file=sys.stderr)
sys.exit(1)
seq_format, is_gz = found.groups()
if seq_format == 'fa':
seq_format = 'fasta'
if seq_format == 'fq':
seq_format = 'fastq'
fh = gzip.open(file, 'rt') if file.endswith('.gz') else open(file, 'r')
else:
fh = sys.stdin
seq_format = args.format
for seq in SeqIO.parse(fh, seq_format):
SeqIO.write([SeqRecord(seq.seq.translate(table=args.table), id=seq.id, description=seq.description)], sys.stdout, 'fasta')
fh.close()
| 35.162162 | 130 | 0.601076 | 350 | 2,602 | 4.365714 | 0.365714 | 0.05301 | 0.020942 | 0.021597 | 0.130236 | 0.065445 | 0.045812 | 0.045812 | 0 | 0 | 0 | 0.005691 | 0.25711 | 2,602 | 73 | 131 | 35.643836 | 0.78479 | 0.038816 | 0 | 0.071429 | 0 | 0.035714 | 0.257509 | 0.036844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.178571 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b90cf31e44e1e7e27466082bc57a29d138e03cf | 7,345 | py | Python | pyfilter/test/test_filter.py | zkscpqm/pyfilter | 39c284681ec6f377059907b75346028d99cbdd4c | [
"MIT"
] | null | null | null | pyfilter/test/test_filter.py | zkscpqm/pyfilter | 39c284681ec6f377059907b75346028d99cbdd4c | [
"MIT"
] | 1 | 2021-04-28T18:40:13.000Z | 2021-04-28T18:40:13.000Z | pyfilter/test/test_filter.py | zkscpqm/pyfilter | 39c284681ec6f377059907b75346028d99cbdd4c | [
"MIT"
] | null | null | null | import unittest
import os
from typing import Any, Text, NoReturn, Set, Union
from parameterized import parameterized
from pyfilter import FilterContext
from pyfilter import TextFilter
class TestFilter(unittest.TestCase):
def setUp(self) -> Any:
self.any_inclusion_keywords: Set[Text] = {'dog', 'cat'}
self.all_inclusion_keywords: Set[Text] = {'plane', 'car'}
self.exclusion_keywords: Set[Text] = {'red', 'grassy'}
self.regex_string: Text = '^[A-Za-z]'
self.filter: TextFilter = TextFilter.new_filter(
any_inclusion_keywords=self.any_inclusion_keywords,
all_inclusion_keywords=self.all_inclusion_keywords,
exclusion_keywords=self.exclusion_keywords,
regex_string=self.regex_string
)
def test_init(self) -> NoReturn:
self.assertEqual(self.filter.any_inclusion_filter.keywords, list(self.any_inclusion_keywords),
'The any_inclusion_keywords are different than the expected LIST of STRINGS of input data')
self.assertEqual(self.filter.all_inclusion_filter.keywords, list(self.all_inclusion_keywords),
'The all_inclusion_keywords are different than the expected LIST of STRINGS of input data')
self.assertEqual(self.filter.exclusion_filter.keywords, list(self.exclusion_keywords),
'The exclusion_keywords are different than the expected LIST of STRINGS of input data')
self.assertEqual(self.filter.regex_filter.regex.pattern, self.regex_string,
'The regex pattern is different than expected')
expected_default_context = FilterContext(casefold=True)
self.assertEqual(self.filter.default_context, expected_default_context,
'The default context is different from the expected (casefold=True)')
def test_update_keywords(self) -> NoReturn:
new_any_inclusion_keywords = []
new_all_inclusion_keywords = []
new_exclusion_keywords = []
self.filter.update_keywords(
any_inclusion_keywords=new_any_inclusion_keywords,
all_inclusion_keywords=new_all_inclusion_keywords,
exclusion_keywords=new_exclusion_keywords
)
self.assertEqual(self.filter.any_inclusion_filter.keywords,
list(self.any_inclusion_keywords) + list(new_any_inclusion_keywords),
'Incorrect any_inclusion_keywords after keyword update')
self.assertEqual(self.filter.all_inclusion_filter.keywords,
list(self.all_inclusion_keywords) + list(new_all_inclusion_keywords),
'Incorrect all_inclusion_keywords after keyword update')
self.assertEqual(self.filter.exclusion_filter.keywords,
list(self.exclusion_keywords) + list(new_exclusion_keywords),
'Incorrect exclusion_keywords after keyword update')
@parameterized.expand([(['new_exclusion', 'kw'],),
(None,)])
def test_set_keywords(self, new_exclusion_keywords: Union[Text, None]):
new_any_inclusion_keywords = ['new', 'keywords']
new_all_inclusion_keywords = []
new_regex_str = r'[A-Za-z0-9]'
self.filter.set_keywords(
any_inclusion_keywords=new_any_inclusion_keywords,
all_inclusion_keywords=new_all_inclusion_keywords,
exclusion_keywords=new_exclusion_keywords,
regex_string=new_regex_str
)
self.assertEqual(self.filter.any_inclusion_filter.keywords,
new_any_inclusion_keywords,
'Incorrect any_inclusion_keywords after replacing keywords')
self.assertEqual(self.filter.all_inclusion_filter.keywords,
[],
'Incorrect all_inclusion_keywords after replacing keywords')
self.assertEqual(self.filter.exclusion_filter.keywords,
new_exclusion_keywords or list(self.exclusion_keywords),
'Incorrect exclusion_keywords after replacing keywords')
self.assertEqual(self.filter.regex_filter.regex.pattern, new_regex_str,
'Failed to set new regex pattern')
def test_delete_keywords(self) -> NoReturn:
any_inclusion_keywords_to_delete = ['dog']
all_inclusion_keywords_to_delete = ['nonexistent']
self.filter.delete_keywords(
any_inclusion_keywords=any_inclusion_keywords_to_delete,
all_inclusion_keywords=all_inclusion_keywords_to_delete,
clear_regex=True
)
self.assertEqual(self.filter.any_inclusion_filter.keywords,
['cat'],
'Incorrect any_inclusion_keywords after deleting keywords')
self.assertEqual(self.filter.all_inclusion_filter.keywords,
list(self.all_inclusion_keywords),
'Incorrect all_inclusion_keywords after deleting keywords')
self.assertEqual(self.filter.exclusion_filter.keywords,
list(self.exclusion_keywords),
'Incorrect exclusion_keywords after deleting keywords')
self.assertEqual(self.filter.regex_filter.regex, None,
'Failed to delete regex pattern')
@parameterized.expand([("Planes and cars don't allow dogs", True, False),
("Dogs and cats but not the other keywords", False, False),
("Well we have a cat in the car but on on the red plane", False, False),
("The plane carries cats and cars", True, True),
("Just a car and a plane but no pets", False, False),
('123regex fail filter plane cats cars', False, False)])
def test_singular_filter(self, input_string: Text,
expected_with_casefold: bool, expected_without_casefold: bool):
self.assertEqual(self.filter.filter(input_string, casefold=True), expected_with_casefold)
self.assertEqual(self.filter.filter(input_string, casefold=False), expected_without_casefold)
def test_multi_filter(self):
input_list = ['cat plane car', 'dog cat', 'cat plane car grassy', '']
result = self.filter.multi_filter(input_list)
expected_result = ['cat plane car']
self.assertEqual(result, expected_result)
@parameterized.expand([('passing_file.txt', True, True),
('casefold_passing_file.txt', True, False),
('failing_file_1.txt', False, False),
('failing_file_2.txt', False, False),
('failing_file_3.txt', False, False)])
def test_file_filter(self, filename: Text,
expected_with_casefold: bool, expected_without_casefold: bool):
fp = os.path.join('test_files', filename)
for casefold in (True, False):
for safe in (True, False):
result = self.filter.file_filter(fp, safe=safe, casefold=casefold)
expected = expected_with_casefold if casefold else expected_without_casefold
self.assertEqual(result, expected)
| 53.224638 | 116 | 0.644384 | 798 | 7,345 | 5.659148 | 0.150376 | 0.150576 | 0.088574 | 0.099646 | 0.568202 | 0.499114 | 0.45992 | 0.45992 | 0.348981 | 0.242693 | 0 | 0.001503 | 0.275425 | 7,345 | 137 | 117 | 53.613139 | 0.84705 | 0 | 0 | 0.144068 | 0 | 0 | 0.188913 | 0.027377 | 0 | 0 | 0 | 0 | 0.169492 | 1 | 0.067797 | false | 0.016949 | 0.050847 | 0 | 0.127119 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b934399f266b4f9c4b7a0180b20fcdaa634e13b | 4,263 | py | Python | tests/conftest.py | Dimfred/imxpy | 289a67fa51ef7b33ee106a65ad69340d07c986b3 | [
"MIT"
] | 13 | 2021-12-11T11:52:32.000Z | 2022-03-11T12:58:56.000Z | tests/conftest.py | Dimfred/imxpy | 289a67fa51ef7b33ee106a65ad69340d07c986b3 | [
"MIT"
] | 1 | 2021-12-19T19:15:29.000Z | 2021-12-26T14:09:16.000Z | tests/conftest.py | Dimfred/imxpy | 289a67fa51ef7b33ee106a65ad69340d07c986b3 | [
"MIT"
] | 1 | 2022-01-10T15:01:04.000Z | 2022-01-10T15:01:04.000Z | from pathlib import Path
import sys
import time
# add parent dir of imxpy
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from easydict import EasyDict as edict
import pytest
from imx_client import IMXClient
from imx_objects import *
def random_number():
import random
return random.randint(0, 100000000000000000000000000000000000)
@pytest.fixture
def random_str():
return str(random_number())
@pytest.fixture
def acc1():
acc = edict()
acc.pk = "4c4b2554e43b374f4cafdd5adaeea5e9aff9b3be54d329bc939752bb747294b9"
acc.addr = "0x77406103701907051070fc029e0a90d5be82f76c"
return acc
@pytest.fixture
def acc2():
acc = edict()
acc.pk = "ac5d52cc7f75e293ecf2a95f3fafef23c9f5345b4a434ed5bacffccbdbe944fd"
acc.addr = "0xea047d1919b732a4b9b12337a60876536f4f2659"
return acc
@pytest.fixture
def acc3():
acc = edict()
acc.pk = "bfde975ea5aa3779c7e2f2aade7c2a594b53e32ee23a2ae395927ec5fce4aa4b"
acc.addr = "0xd5f5ad7968147c2e198ddbc40868cb1c6f059c6d"
return acc
@pytest.fixture
def one_eth():
return 1_000_000_000_000_000_000
@pytest.fixture
def half_eth(one_eth):
return one_eth // 2
@pytest.fixture(scope="function")
def client(acc1):
return IMXClient("test", pk=acc1.pk)
@pytest.fixture(scope="function")
def mainnet_client():
return IMXClient("main")
@pytest.fixture(scope="function")
def client2(acc2):
return IMXClient("test", pk=acc2.pk)
@pytest.fixture(scope="function")
def project_id(client, acc1):
params = CreateProjectParams(
name="test_proj", company_name="test_company", contact_email="test@test.com"
)
res = client.create_project(params)
res = res.result()
return res["result"]["id"]
@pytest.fixture(scope="function")
def random_addr():
import random
allowed = "abcdef0123456789"
addr = f"0x{''.join(random.choice(allowed) for _ in range(40))}"
return addr
@pytest.fixture
def contract_addr():
return "0xb72d1aa092cf5b3b50dabb55bdab0f33dfab37b7"
@pytest.fixture
def unregistered_contract_addr():
return "0xb55016be31047c16c951612f3b0f7c5f92f1faf5"
@pytest.fixture(scope="function")
def token_id(client2, acc1, acc2, contract_addr):
_token_id = 0
yield _token_id
params = TransferParams(
sender=acc2.addr,
receiver=acc1.addr,
token=ERC721(token_id=_token_id, contract_addr=contract_addr),
)
client2.transfer(params)
def mint_params(contract_addr, id_, addr):
params = MintParams(
contract_addr=contract_addr,
targets=[
MintTarget(
addr=addr,
tokens=[
MintableToken(
id=id_,
blueprint=str(id_),
),
],
),
],
)
return params
@pytest.fixture(scope="function")
def minted_nft_id(client, acc1, contract_addr):
token_id = random_number()
params = mint_params(contract_addr, token_id, acc1.addr)
res = client.mint(params)
res = res.result()
# wait until the database has applied the state
time.sleep(2)
return token_id
@pytest.fixture(scope="function")
def valid_order_params(client, client2, acc2, contract_addr):
# client1 is in control of the sc therefore he mints to acc2
token_id = random_number()
params = mint_params(contract_addr, token_id, acc2.addr)
res = client.mint(params)
time.sleep(2)
# client2 now has the nft and can create the order which client1 will buy
params = CreateOrderParams(
sender=acc2.addr,
token_sell=ERC721(token_id=token_id, contract_addr=contract_addr),
token_buy=ETH(quantity="0.000001"),
)
res = client2.create_order(params)
res = res.result()
time.sleep(2)
return (res["result"]["order_id"], token_id)
@pytest.fixture
def unregistered_addr():
return "0xd2Bf8229D98716abEA9D22453C5C5613078B2c46"
@pytest.fixture
def erc20_contract_addr():
return "0x4c04c39fb6d2b356ae8b06c47843576e32a1963e"
@pytest.fixture
def gods_unchained_addr():
return "0xacb3c6a43d15b907e8433077b6d38ae40936fe2c"
@pytest.fixture
def gods_addr():
return "0xccc8cb5229b0ac8069c51fd58367fd1e622afd97"
| 21.861538 | 84 | 0.697162 | 475 | 4,263 | 6.086316 | 0.277895 | 0.089934 | 0.066413 | 0.071947 | 0.198893 | 0.110688 | 0.067797 | 0.067797 | 0.067797 | 0.037357 | 0 | 0.132842 | 0.203612 | 4,263 | 194 | 85 | 21.974227 | 0.718704 | 0.046915 | 0 | 0.34375 | 0 | 0 | 0.192213 | 0.148595 | 0 | 0 | 0.093149 | 0 | 0 | 1 | 0.171875 | false | 0 | 0.070313 | 0.09375 | 0.40625 | 0.007813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b93c4cb81d74f0acde8f0cc82aaa2e1626a119a | 1,088 | py | Python | off_policy_rl/off_policy_rl/utils/epoch.py | dti-research/ur-learning-shifting-for-grasping | 2dfecf6b2dbe67b65af00fc0ae5f73be2cb8a801 | [
"BSD-3-Clause"
] | 1 | 2021-04-12T07:04:26.000Z | 2021-04-12T07:04:26.000Z | off_policy_rl/off_policy_rl/utils/epoch.py | dti-research/ur-learning-shifting-for-grasping | 2dfecf6b2dbe67b65af00fc0ae5f73be2cb8a801 | [
"BSD-3-Clause"
] | 1 | 2021-11-10T15:51:15.000Z | 2021-11-10T15:51:15.000Z | off_policy_rl/off_policy_rl/utils/epoch.py | dti-research/ur-learning-shifting-for-grasping | 2dfecf6b2dbe67b65af00fc0ae5f73be2cb8a801 | [
"BSD-3-Clause"
] | null | null | null | from typing import List
import numpy as np
from off_policy_rl.utils.selection_method import SelectionMethod
class Epoch:
def __init__(
self,
number_episodes: int,
selection_methods: List[SelectionMethod],
probabilities: List[float] = None
):
self.number_episodes = number_episodes
if len(selection_methods) > 1 and len(selection_methods) != len(probabilities):
raise AssertionError("The number of Selection Methods must match the number of probabilities")
self.selection_methods = selection_methods
if probabilities is not None:
if sum(probabilities) != 1.0:
raise AssertionError("The list of probabilities must add to 1.0"
" (current sum: {})".format(sum(probabilities)))
self.probabilities = probabilities
def get_selection_method(self) -> SelectionMethod:
if len(SelectionMethod) > 1:
return np.random.choice(self.selection_methods, p=self.probabilities)
return self.selection_methods[-1]
| 35.096774 | 106 | 0.661765 | 121 | 1,088 | 5.793388 | 0.404959 | 0.182596 | 0.085592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008728 | 0.262868 | 1,088 | 30 | 107 | 36.266667 | 0.865337 | 0 | 0 | 0 | 0 | 0 | 0.118566 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b947dc36dc1850a2e02965bc2a02ae7eeca3cad | 11,761 | py | Python | model/deterministic_decoder.py | illc-uva/deep-generative-lm | c65bdf9d72e7d9d4e02576b1e84bce623725a0cd | [
"MIT"
] | 26 | 2019-04-18T13:07:34.000Z | 2021-03-24T11:55:26.000Z | model/deterministic_decoder.py | illc-uva/deep-generative-lm | c65bdf9d72e7d9d4e02576b1e84bce623725a0cd | [
"MIT"
] | null | null | null | model/deterministic_decoder.py | illc-uva/deep-generative-lm | c65bdf9d72e7d9d4e02576b1e84bce623725a0cd | [
"MIT"
] | 9 | 2019-04-18T23:00:46.000Z | 2021-09-23T15:34:56.000Z | """
A deterministic decoder.
"""
import numpy as np
import sys
import os.path as osp
from collections import defaultdict
from warnings import warn
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
# We include the path of the toplevel package in the system path so we can always use absolute imports within the package.
toplevel_path = osp.abspath(osp.join(osp.dirname(__file__), '..'))
if toplevel_path not in sys.path:
sys.path.insert(1, toplevel_path)
from model.base_decoder import BaseDecoder # noqa: E402
from util.error import InvalidArgumentError # noqa: E402
__author__ = "Tom Pelsmaeker"
__copyright__ = "Copyright 2018"
class DeterministicDecoder(BaseDecoder):
"""A deterministic decoder, i.e. a RNN with next-word prediction objective.
Args:
device(torch.device): the device (cpu/gpu) on which the model resides.
seq_len(int): maximum length of sequences passed to the model.
kl_step(int): step size of linear kl weight increment during training of the model.
word_p(float): probability of dropping a word, i.e. mapping it to <unk>, before decoding.
parameter_p(float): probability of dropping a row in the weight layers, using Gal's dropout on non-rec layers.
var_mask(boolean): whether to use a different parameter dropout mask at every timestep.
unk_index(int): index of the <unk> token in a one-hot representation.
css(boolean): whether to use CSS softmax approximation.
N(int): number of sequences in the dataset, for the regularization weight.
rnn_type(str): which RNN to use. [GRU, LSTM] are supported.
v_dim(int): size of the vocabulary.
x_dim(int): size of input embeddings.
h_dim(int): size of hidden layers of the RNN.
l_dim(int): number of layers of the RNN.
"""
def __init__(self, device, seq_len, word_p, parameter_p, drop_type, unk_index, css, sparse, N, rnn_type,
tie_in_out, v_dim, x_dim, h_dim, s_dim, l_dim):
super(DeterministicDecoder, self).__init__(device, seq_len, word_p, parameter_p, drop_type, unk_index, css, N,
rnn_type, v_dim, x_dim, h_dim, s_dim, l_dim)
self.tie_in_out = tie_in_out
# The model embeds words and passes them through the RNN to get a probability of next words.
self.emb = nn.Embedding(v_dim, x_dim, sparse=bool(sparse))
# We currently support GRU and LSTM type RNNs
if rnn_type == "GRU":
if self.drop_type in ["varied", "shared"]:
# Varied and shared dropout modes only drop input and output layer. Shared shares between timesteps.
self.grnn = nn.GRU(x_dim, h_dim, l_dim, batch_first=True)
else:
self.grnn = nn.ModuleList([nn.GRUCell(x_dim, h_dim, 1)])
self.grnn.extend([nn.GRUCell(h_dim, h_dim, 1)
for _ in range(l_dim - 1)])
elif rnn_type == "LSTM":
if self.drop_type in ["varied", "shared"]:
self.grnn = nn.LSTM(x_dim, h_dim, l_dim, batch_first=True)
else:
self.grnn = nn.ModuleList([nn.LSTMCell(x_dim, h_dim, 1)])
self.grnn.extend([nn.LSTMCell(h_dim, h_dim, 1)
for _ in range(l_dim - 1)])
self.linear = nn.Linear(h_dim, v_dim)
@property
def linear(self):
return self._linear
@linear.setter
def linear(self, val):
self._linear = val
if self.tie_in_out:
if self.h_dim != self.x_dim:
raise InvalidArgumentError("h_dim should match x_dim when tying weights.")
self._linear.weight = self.emb.weight
def forward(self, data, log_likelihood=False, extensive=False):
"""Forward pass through the decoder which returns a loss and prediction.
Args:
data(list of torch.Tensor): a batch of datapoints, containing at least a tensor of sequences and optionally
tensors with length information and a mask as well, given variable length sequences.
Returns:
losses(dict of torch.FloatTensor): computed losses, averaged over the batch, summed over the sequence.
pred(torch.LongTensor): most probable sequences given the data, as predicted by the model.
"""
x_in, x_len, x_mask = self._unpack_data(data, 3)
losses = defaultdict(lambda: torch.tensor(0., device=self.device))
# Before decoding, we map a fraction of words to <UNK>, weakening the Decoder
self.word_dropout.sample_mask(self.word_p, x_in.shape)
x_dropped = x_in.clone()
x_dropped[self.word_dropout._mask == 0] = self.unk_index
x = self.emb(x_dropped[:, :-1])
scores = self._rnn_forward(x, x_len)
# Compute loss, averaged over the batch, but summed over the sequence
if self.css and self.training:
loss = self._css(scores, x_in[:, 1:])
else:
loss = self.reconstruction_loss(scores.contiguous().view(
[-1, scores.shape[2]]), x_in[:, 1:].contiguous().view([-1])).view(scores.shape[0], scores.shape[1])
if x_len is not None:
# If we had padded sequences as input, we need to mask the padding from the loss
losses["NLL"] = torch.sum(torch.mean(loss * x_mask[:, 1:], 0))
else:
losses["NLL"] = torch.sum(torch.mean(loss, 0))
# We also return the predictions, i.e. the most probable token per position in the sequences
pred = torch.max(scores.detach(), dim=2)[1]
# We use L2-regularization scaled by dropout on the network layers (Gal, 2015)
losses["L2"] = self._l2_regularization()
if log_likelihood:
losses["NLL"] = losses["NLL"].unsqueeze(0)
if extensive:
return losses, pred, x.new_tensor([[1, 1]]), x.new_tensor([[1, 1]]), x.new_tensor([[1, 1]]), \
x.new_tensor([[1, 1]]), x.new_tensor([[1]]), x.new_tensor([[1]])
else:
return losses, pred
def _rnn_forward(self, x, x_len):
"""Recurrent part of the forward pass. Decides between fast or slow based on the dropout type."""
# Drop rows of the input
shape = torch.Size(x.shape) if self.var_mask else torch.Size([x.shape[0], 1, self.x_dim])
h = self.parameter_dropout_in(x, self.parameter_p, shape=shape)
# We have to run a (slow) for loop to use recurrent dropout
if self.drop_type == "recurrent":
# Sample fixed dropout masks for every timestep
shape = torch.Size([x.shape[0], int(self.h_dim/self.l_dim)])
for i in range(self.l_dim):
self.parameter_dropout_hidden[i].sample_mask(self.parameter_p, shape)
self.parameter_dropout_out[i].sample_mask(self.parameter_p, shape)
if self.rnn_type == "LSTM":
self.parameter_dropout_context[i].sample_mask(self.parameter_p, shape)
# Forward passing with application of dropout
scores = []
if self.rnn_type == "GRU":
h_p = list(torch.unbind(self._init_hidden(x.shape[0])))
else:
h_p = list(torch.unbind(self._init_hidden(x.shape[0])))
c_p = list(torch.unbind(self._init_hidden(x.shape[0])))
for j in range(x.shape[1]):
h_j = h[:, j, :]
for i, grnn in enumerate(self.grnn):
if self.rnn_type == "GRU":
h_j = grnn(h_j, h_p[i])
h_p[i] = self.parameter_dropout_hidden[i].apply_mask(h_j)
else:
h_j, c_j = grnn(h_j, (h_p[i], c_p[i]))
h_p[i] = self.parameter_dropout_hidden[i].apply_mask(h_j)
c_p[i] = self.parameter_dropout_context[i].apply_mask(c_j)
h_j = self.parameter_dropout_out[i].apply_mask(h_j)
scores.append(self.linear(h_j))
scores = torch.stack(scores, 1)
# For the input/output dropout we can use fast CUDA RNNs
else:
# To h: [batch_size, seq_len, h_dim] we apply the same mask: [batch_size, 1, h_dim] at every timestep
shape = torch.Size(h.shape) if self.var_mask else torch.Size([x.shape[0], 1, self.h_dim])
if x_len is not None:
h = pack_padded_sequence(h, x_len - 1, batch_first=True)
h, _ = self.grnn(h)
if x_len is not None:
h = pad_packed_sequence(h, batch_first=True, total_length=x.shape[1])[0]
# We also apply the same dropout mask to every timestep in the output hidden states
h = self.parameter_dropout_out(h, self.parameter_p, shape=shape)
scores = self.linear(h)
return scores
def sample_sequences(self, x_i, seq_len, eos_token, pad_token, sample_softmax=False):
"""'Sample' sequences from the (learned) decoder given a prefix of tokens.
Args:
x_i(torch.Tensor): initial tokens or sequence of tokens to start generating from.
seq_len(int): length of the sampled sequences after the prefix. Defaults to preset seq_len.
eos_token(int): the end of sentence indicator.
pad_token(int): the token used for padding sentences shorter than seq_len.
Returns:
list: a list of sampled sequences of pre-defined length.
"""
if seq_len is not None:
self.seq_len = seq_len
else:
warn("No sequence length provided, preset seq_len will be used.")
with torch.no_grad():
if sample_softmax:
h_i = None
c_i = None
else:
h_i = self._sample_hidden(x_i.shape[0])
c_i = self._sample_hidden(x_i.shape[0])
samples = []
# Sampling pass through the sequential decoder
# The prefix is automatically consumed by the first step through the RNN
for i in range(x_i.shape[1]):
samples.append(x_i[:, i].squeeze().tolist())
for i in range(self.seq_len):
x_i = self.emb(x_i)
if self.rnn_type == "GRU":
h, h_i = self.grnn(x_i, h_i)
else:
h, h_i, c_i = self.grnn(x_i, (h_i, c_i))
# scores: [batch_size, h_dim]
scores = self.linear(h[:, -1])
# x_i: [batch_size, 1]
if sample_softmax:
# Sample the output Bernoulli
x_i = torch.multinomial(F.softmax(scores, 1), 1)
else:
# Argmax based on stochasticity from hidden
x_i = torch.max(scores, dim=1, keepdim=True)[1]
samples.append(x_i.squeeze().tolist())
# Pad samples after the first <eos> token
samples = np.array(samples).T
eos_spot = np.argwhere(samples == eos_token)
prev_row = -1
for spot in eos_spot:
if spot[0] != prev_row:
try:
samples[spot[0], spot[1]+1:] = pad_token
except IndexError:
pass
else:
pass
prev_row = spot[0]
return list(samples)
def _sample_hidden(self, batch_size):
"""Sample the hidden state of a GRU RNN from a standard normal."""
return torch.normal(mean=torch.zeros((self.l_dim, batch_size, self.h_dim), device=self.device))
| 45.762646 | 122 | 0.596803 | 1,677 | 11,761 | 4.013119 | 0.206917 | 0.011887 | 0.026746 | 0.007132 | 0.218276 | 0.168053 | 0.150074 | 0.108172 | 0.100743 | 0.093314 | 0 | 0.009768 | 0.303631 | 11,761 | 256 | 123 | 45.941406 | 0.811966 | 0.310348 | 0 | 0.2 | 0 | 0 | 0.025168 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045161 | false | 0.012903 | 0.070968 | 0.006452 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b96bddabe2ed1e571fa0fb67e8fe24ad3b42daf | 2,316 | py | Python | trove/tests/scenario/runners/instance_force_delete_runners.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 244 | 2015-01-01T12:04:44.000Z | 2022-03-25T23:38:39.000Z | trove/tests/scenario/runners/instance_force_delete_runners.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 6 | 2015-08-18T08:19:10.000Z | 2022-03-05T02:32:36.000Z | trove/tests/scenario/runners/instance_force_delete_runners.py | sapcc/trove | c03ec0827687fba202f72f4d264ab70158604857 | [
"Apache-2.0"
] | 178 | 2015-01-02T15:16:58.000Z | 2022-03-23T03:30:20.000Z | # Copyright 2016 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from trove.tests.scenario import runners
from trove.tests.scenario.runners.test_runners import SkipKnownBug
from trove.tests.scenario.runners.test_runners import TestRunner
class InstanceForceDeleteRunner(TestRunner):
def __init__(self):
super(InstanceForceDeleteRunner, self).__init__(sleep_time=1)
self.build_inst_id = None
def run_create_build_instance(self, expected_states=['NEW', 'BUILD'],
expected_http_code=200):
if self.is_using_existing_instance:
raise SkipTest("Using an existing instance.")
name = self.instance_info.name + '_build'
flavor = self.get_instance_flavor()
client = self.auth_client
inst = client.instances.create(
name,
self.get_flavor_href(flavor),
self.instance_info.volume,
nics=self.instance_info.nics,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
self.assert_client_code(client, expected_http_code)
self.assert_instance_action([inst.id], expected_states)
self.build_inst_id = inst.id
def run_delete_build_instance(self, expected_http_code=202):
if self.build_inst_id:
client = self.admin_client
client.instances.force_delete(self.build_inst_id)
self.assert_client_code(client, expected_http_code)
def run_wait_for_force_delete(self):
raise SkipKnownBug(runners.BUG_FORCE_DELETE_FAILS)
# if self.build_inst_id:
# self.assert_all_gone([self.build_inst_id], ['SHUTDOWN'])
| 38.6 | 78 | 0.702936 | 298 | 2,316 | 5.214765 | 0.419463 | 0.030888 | 0.050193 | 0.057915 | 0.189833 | 0.138996 | 0.113256 | 0.113256 | 0 | 0 | 0 | 0.008319 | 0.221503 | 2,316 | 59 | 79 | 39.254237 | 0.853577 | 0.294041 | 0 | 0.0625 | 0 | 0 | 0.02534 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b976f533f13bffac48ebcfedaef5b78d985ab9b | 489 | py | Python | tests/providers/test_twilio.py | yaakov-github/notifiers | ae204bc08fd9efa06597e5e2cf30ad0a305c94bb | [
"MIT"
] | 2 | 2019-10-06T01:53:42.000Z | 2019-11-19T07:52:17.000Z | tests/providers/test_twilio.py | Delgan/notifiers | 8dd2a8aaa81a9433034a8f347d984c8aa80be9af | [
"MIT"
] | null | null | null | tests/providers/test_twilio.py | Delgan/notifiers | 8dd2a8aaa81a9433034a8f347d984c8aa80be9af | [
"MIT"
] | null | null | null | import pytest
provider = 'twilio'
class TestTwilio:
def test_twilio_metadata(self, provider):
assert provider.metadata == {
'base_url': 'https://api.twilio.com/2010-04-01/Accounts/{}/Messages.json',
'name': 'twilio',
'site_url': 'https://www.twilio.com/'
}
@pytest.mark.online
def test_sanity(self, provider):
data = {
'message': 'foo'
}
provider.notify(**data, raise_on_errors=True)
| 23.285714 | 86 | 0.570552 | 53 | 489 | 5.132075 | 0.679245 | 0.051471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022792 | 0.282209 | 489 | 20 | 87 | 24.45 | 0.752137 | 0 | 0 | 0 | 0 | 0 | 0.253579 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |