seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43732887683 | from collections import deque
import sys
input = sys.stdin.readline
R = []
for _ in range(int(input())):
REV = False
ERR = False
F = input().strip()
N = input()
L = list(input().replace(
"[", "").replace("]", "").strip().split(","))
if L == [""]:
L = []
D = deque(L)
for f in F:
if f == "R":
REV = True if REV == False else False
else:
if D:
if REV:
D.pop()
else:
D.popleft()
else:
ERR = True
break
if D:
if REV:
D.reverse()
R.append(list(D))
else:
if ERR:
R.append("error")
else:
R.append([])
for r in R:
if r == "error":
print("error")
else:
print("[", end="")
print(",".join(r), end="")
print("]")
| pokycookie/BAEKJOON | 5430.py | 5430.py | py | 927 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 18,
"usage_type": "call"
}
] |
1498550659 | #!/usr/bin/env python3
# dpw@plaza.localdomain
# 2023-09-19 19:17:00
import json
import sys
from dataclasses import dataclass
from pathlib import Path
from rich import inspect, print
class TrieNode:
def __init__(self, char):
self.char = char
self.is_end = False
self.children = {}
def __repr__(self):
return f"char:{self.char}, end: {self.is_end}, children: {self.children}"
@dataclass
class Trie:
"""A container for holding Trie nodes. Will store and word, name, email, phone number, etc."""
root: TrieNode = TrieNode("")
word_count: int = 0
def insert(self, word: str) -> None:
word = word.lower()
node = self.root
for char in word:
if char in node.children:
node = node.children[char]
else:
new_node = TrieNode(char)
node.children[char] = new_node
node = new_node
node.is_end = True
self.word_count += 1
def depth_first_search(self, node: TrieNode, prefix: str):
"""depth first search"""
if node.is_end:
self.output.append((prefix + node.char))
for child in node.children.values():
self.depth_first_search(child, prefix + node.char)
def search(self, word: str) -> list:
"""search for words"""
word = word.lower()
node = self.root
for char in word:
if char in node.children:
node = node.children[char]
else:
return []
self.output = []
self.depth_first_search(node, word[:-1])
return self.output
def read_proper_names() -> Trie:
trie = Trie()
path = Path("/usr/share/dict/propernames")
for name in path.read_text().split():
trie.insert(name)
return trie
def read_data():
with open("data/emails.json") as f:
data = json.load(f)
return data
def insert_data(trie: Trie, data):
for v in data:
trie.insert(v)
trie = Trie()
data = read_data()
def main(args: list) -> None:
# print(f'{args}')
insert_data(trie, data)
if __name__ == "__main__":
main(sys.argv[1:])
| darrylwest/python-play | algorithms/trie.py | trie.py | py | 2,205 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_num... |
6445310024 | from PyQt5 import QtWidgets, QtGui, uic
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QFileDialog, QTableWidgetItem
from PyQt5.QtSql import QSqlTableModel
from datetime import datetime
from . import utils
class ImportDialog(QtWidgets.QDialog):
def __init__(self, parent):
super(ImportDialog, self).__init__(parent)
uic.loadUi('src/importView.ui', self)
self.thumbTicket = 0
self.mute = 0
self.listing.setColumnHidden(5, True)
self.listing.setColumnHidden(6, True)
self.listing.selectionModel().selectionChanged.connect(
lambda _, __: self.updatePreview())
self.listing.setItemDelegateForColumn(
2, utils.DateEditDelegate(self))
self.listing.setItemDelegateForColumn(
3, utils.CatalogEditDelegate(self))
self.listing.setItemDelegateForColumn(
4, utils.NonEditableDelegate(self))
self.listing.cellChanged.connect(lambda x, y: self.updatePreview())
self.openBtn.clicked.connect(self.openAction)
self.openBtn.dragEnterEvent = self.dragEnterFileEvent
self.openBtn.dropEvent = self.dropFileEvent
self.delBtn.clicked.connect(self.deleteAction)
self.buttonBox.accepted.connect(self.importAction)
self.katalogmodel = QSqlTableModel(self)
self.katalogmodel.setTable('catalogs')
self.katalogmodel.select()
self.boxCatalog.setModel(self.katalogmodel)
self.boxTitle.textChanged.connect(lambda x: self.updateFromBox())
self.boxIndex.textChanged.connect(lambda x: self.updateFromBox())
self.boxDate.dateChanged.connect(lambda x: self.updateFromBox())
self.boxCatalog.currentIndexChanged.connect(
lambda x: self.updateFromBox())
def openAction(self):
filePath = QFileDialog.getOpenFileNames(self, 'OpenFile')[0]
[self.addPath(path) for path in filePath]
def addPath(self, filePath):
self.thumbTicket += 1
fileThumb = utils.retrieveTempThumb(
filePath, "import_%d" % self.thumbTicket)
date = datetime.strftime(utils.getModifiedDate(filePath), '%Y-%m-%d')
title = utils.getFileNameNoExt(filePath)
index = ""
category = ""
items = [index, title, str(date), category,
filePath, fileThumb]
row = self.listing.rowCount()
self.listing.insertRow(row)
for i, item in enumerate(items):
self.listing.setItem(row, i, QTableWidgetItem(item))
def updatePreview(self):
rows = self.listing.selectedItems()
if (len(rows) > 0):
datasheet = [self.listing.item(rows[0].row(), x)
.text() for x in range(6)]
index, title, date, catalog, path, thumb = datasheet
date = datetime.strptime(date, '%Y-%m-%d')
self.mute += 1
self.fileLabel.setPixmap(QtGui.QPixmap(thumb))
if not self.boxTitle.hasFocus():
self.boxTitle.setText(title)
if not self.boxIndex.hasFocus():
self.boxIndex.setText(index)
if not self.boxDate.hasFocus():
self.boxDate.setDate(date)
i = self.boxCatalog.findText(catalog)
if i >= 0:
self.boxCatalog.setCurrentIndex(i)
self.mute -= 1
def importAction(self):
for row in range(self.listing.rowCount()):
datasheet = [self.listing.item(row, x)
.text() for x in range(6)]
index, title, date, catalog, path, thumb = datasheet
utils.importDocument(path, title, index, date, catalog)
def dragEnterFileEvent(self, e):
if e.mimeData().hasUrls():
e.setDropAction(Qt.CopyAction)
e.accept()
else:
e.ignore()
def dropFileEvent(self, e):
files = [u.toLocalFile() for u in e.mimeData().urls()]
im = ImportDialog(self)
for f in files:
im.addPath(f)
im.exec()
self.updateListing()
def updateFromBox(self):
rows = self.listing.selectedItems()
if (len(rows) > 0 and self.mute == 0):
datasheet = [self.listing.item(rows[0].row(), x)
.text() for x in range(6)]
datasheet[0] = self.boxIndex.text()
datasheet[1] = self.boxTitle.text()
datasheet[2] = datetime.strftime(
self.boxDate.date().toPyDate(), '%Y-%m-%d')
datasheet[3] = self.boxCatalog.currentText()
[self.listing.item(rows[0].row(), x)
.setText(datasheet[x]) for x in range(6)]
def deleteAction(self):
rows = [x.row() for x in self.listing.selectedItems()]
rows = reversed(list(set(rows)))
for r in rows:
self.listing.removeRow(r)
| willnode/Arsipin | src/importDialog.py | importDialog.py | py | 4,887 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic... |
1538930426 | import logging
import numpy as np
__author__ = 'frank.ma'
logger = logging.getLogger(__name__)
class RdmBivariate(object):
@staticmethod
def __check_rho(rho: float):
if abs(rho) >= 1.0:
raise ValueError('rho (%.4f) should be smaller than 1' % rho)
@staticmethod
def draw_std(rho: float, size: int):
RdmBivariate.__check_rho(rho)
x1 = np.random.random(size=size)
x2 = np.random.random(size=size)
return x1, rho * x1 + np.sqrt(1.0 - rho * rho) * x2
@staticmethod
def pdf(x_1: np.array, x_2: np.array, rho: float, mu_1: float = 0.0, mu_2: float = 0.0,
sig_1: float = 1.0, sig_2: float = 1.0):
RdmBivariate.__check_rho(rho)
x_1_norm = (x_1 - mu_1) / sig_1
x_2_norm = (x_2 - mu_2) / sig_2
xx1, xx2 = np.meshgrid(x_1_norm, x_2_norm)
z = (xx1 ** 2) - 2.0 * rho * xx1 * xx2 + (xx2 ** 2)
return np.exp(-z / (2.0 * np.sqrt(1.0 - rho ** 2))) / (2.0 * np.pi * sig_1 * sig_2 * np.sqrt(1.0 - rho ** 2))
| frankma/Finance | src/Utils/Sequence/RdmBivariate.py | RdmBivariate.py | py | 1,032 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.r... |
34185397192 | # FastApi einbinden für REST-Services
from fastapi import FastAPI, APIRouter
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
# JSON Serialisierung
import orjson
# Pangas zur Daten-Anaylse
import pandas as pd
# GeoPandas für geometrische Funktionen
import geopandas
# Für geometrische Berechnungen
from shapely.geometry import Point
import math
####
## Rückgabe für Anfrage, wird automatisch in JSON serialisiert
###
class ORJSONResponse(JSONResponse):
media_type = "application/json"
###
## Wird aufgerufen, wenn die Antwort zurück gegeben wird
###
def render(self, content) -> bytes:
return orjson.dumps(content)
# Einen Router erzeugen, damit man zwei bereiche der Anwendung (Frontend, Backend) im Pfad trennen kann
router = APIRouter()
# Definition des Backends
api_app = FastAPI(title="die Schnittstelle", default_response_class=ORJSONResponse)
api_app.include_router(router)
# Definition der Oberfläche
app = FastAPI(title="die Oberfläche")
app.mount('/api', api_app)
app.mount('/', StaticFiles(directory="./static", html=True), name="static")
###
## Diese Funktion nimmt Longitude und Latitude entgegen und findet Bewegungsdaten, die in der Nähe liegen.
## Es werden 30 Einträge zurück gegeben.
###
@api_app.get("/entfernung")
async def read_root(longitude: float, latitude: float):
stationsdaten = pd.read_csv (r'./daten/2022-06-11-stations.csv')
geodaten = geopandas.GeoDataFrame(stationsdaten, geometry=geopandas.points_from_xy(stationsdaten.longitude, stationsdaten.latitude, crs='epsg:4326'))
mcs = geopandas.GeoDataFrame(geometry=[Point(float(longitude), float(latitude))], crs='epsg:4326')
mcs = mcs.to_crs('EPSG:31469')
umgewandelt = geodaten.to_crs('EPSG:31469')
umgewandelt['entfernung'] = umgewandelt.geometry.apply(lambda g: math.floor(mcs.distance(g)))
preisinformationen = pd.read_csv(r'./daten/2022-06-11-prices.csv')
preisinformationen = preisinformationen.groupby('station_uuid').mean()
bewegungsdaten = pd.merge(preisinformationen, umgewandelt, left_on='station_uuid', right_on='uuid')
bewegungsdaten = bewegungsdaten.sort_values(by=['entfernung'], ascending=True)
ergebnis = bewegungsdaten.head(30)
print(ergebnis)
# Ergebnis als Index Dictonary zurückgeben
return ergebnis.to_dict('records')
| veberle/MCS_Praktikum_Aufgaben | src/challenges/challenge4.py | challenge4.py | py | 2,364 | python | de | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.responses.JSONResponse",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "orjson.dumps",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "fastapi.APIRouter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "fastapi... |
34694646391 |
import asyncio
import xml.etree.ElementTree as ET
from os import listdir, path
import json
import requests
import pynetbox
import json
from multiprocessing.dummy import Pool
from netaddr import IPAddress
import logging
import os
import filecmp
import re
import sys
import shutil
import time
from netmiko import ConnectHandler
import threading
import xml.dom.minidom
from collections import OrderedDict
import urllib3
from pyzabbix import ZabbixAPI
from pkg_resources import parse_version
urllib3.disable_warnings()
start_time = time.time()
"""
Variables
"""
type_de_log = 'debug' # defaut = 'info', en cas de problème changez en "debug" et executez à nouveau (faire le ménage dans le fichier de log après)
token_netbox = ''
url_netbox = 'url' # bien mettre le / a la fin
url_zabbix = 'url'
token_zabbix = 'token'
username = 'compte admin'
password = 'mdp admin'
"""
"""
if type_de_log == 'info':
logging.basicConfig(filename='./log/netbox_log.log', encoding='utf-8', level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("paramiko").setLevel(logging.INFO)
else:
logging.basicConfig(filename='./log/netbox_log.log', encoding='utf-8', level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("paramiko").setLevel(logging.WARNING)
#print("\n" + "DEBUT DE L'EXPORT ZABBIX")
try:
def remove_none(obj):
"""
Retire les valeurs None
Comme expliqué sur : https://stackoverflow.com/a/20558778/6753144
"""
if isinstance(obj, (list, tuple, set)):
return type(obj)(remove_none(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)(
(remove_none(k), remove_none(v))
for k, v in obj.items()
if k is not None and v is not None
)
else:
return obj
def get_zabbix_connection():
"""
Retourne l'object zbx_pyzabbix, puis on essaie de ce connecter à l'API de zabbix
"""
# pyzabbix library, with user\password in login method. It's GOOD library
logging.debug("Try connect to Zabbix by pyzabbix...")
try:
zbx_pyzabbix = ZabbixAPI(url_zabbix)
zbx_pyzabbix.session.verify = False
zbx_pyzabbix.login(api_token=token_zabbix)
return zbx_pyzabbix
except Exception as e:
logging.exception(e)
# py-zabbix library, with user\password in ZabbixAPI
raise Exception("Some error in pyzabbix or py_zabbix module, see logs")
"""
Organise les fichiers exportés
"""
def order_data(data):
if isinstance(data, dict):
for key, value in data.items():
data[key] = order_data(value)
return OrderedDict(sorted(data.items()))
elif isinstance(data, list):
data.sort(key=lambda x: str(x))
return [order_data(x) for x in data]
else:
return data
def dumps_json(object, data, directory, key="name", save_yaml=False, drop_keys=[]):
"""
Create JSON or yaml file in folder
"""
subfolder = os.path.join(directory, object.lower())
if not os.path.exists(subfolder):
os.makedirs(subfolder)
data = order_data(data)
for item in data:
if drop_keys:
for drop_key in drop_keys:
if drop_key in item:
item.pop(drop_key, None)
txt = json.dumps(item, indent=4)
# Remove bad characters from name
if isinstance(key, tuple):
name = "_".join(map(lambda x: item[x], key))
else:
name = item[key]
name = re.sub(r'[\\/:"*?<>|]+', " ", name)
filename = "{}/{}.{}".format(subfolder, name, "yaml" if save_yaml else "json")
filename = os.path.abspath(filename)
with open(filename, mode="w", encoding="utf-8", newline="\n") as file:
file.write(txt)
def dump_xml(object, txt, name, directory, save_yaml=False):
"""
Create XML or YAML in folder
"""
folder = os.path.join(directory, object.lower())
if not os.path.exists(folder):
os.makedirs(folder)
# Remove bad characters from name
name = re.sub(r'[\\/:"*?<>|]+', " ", name)
filename = "{}/{}.{}".format(folder, name, "yaml" if save_yaml else "xml")
filename = os.path.abspath(filename)
# Remove bad lines from content
# date
txt = re.sub(r"<date>.*<\/date>", "", txt)
# zabbix.version
# txt = re.sub(r'<version>.*<\/version>', '', txt)
# ppretty xml
xml_ = xml.dom.minidom.parseString(
txt
) # or xml.dom.minidom.parseString(xml_string)
txt = xml_.toprettyxml(indent=" ", encoding="UTF-8")
txt = txt.decode()
# replace xml quot to normal readable "
txt = txt.replace(""", '"')
with open(filename, mode="w", encoding="utf-8", newline="\n") as file:
file.write(txt)
#print('-', end="", flush=True)
def main(zabbix_, save_yaml, directory, only="all"):
# XML
# Standart zabbix xml export via API
def export(zabbix_api, type, itemid, name):
"""
Export one type: hosts, template, screen or other
https://www.zabbix.com/documentation/4.0/manual/api/reference/configuration/export
"""
logging.info("Debut de l'export")
items = zabbix_api.get()
logging.debug("Processing...")
for item in items:
try:
txt = zabbix_.configuration.export(
format="xml", options={type: [item[itemid]]}
)
dump_xml(
object=type,
txt=txt,
name=item[name],
save_yaml=save_yaml,
directory=directory,
)
except Exception as e:
logging.error(
"Exception during export of template: {}".format(item[name])
)
logging.error(e)
#logging.debug("Source Zabbix server version")
export(zabbix_.host, "hosts", "hostid", "name")
# JSON
# not support `export` method
# Read more in https://www.zabbix.com/documentation/4.0/manual/api/reference/configuration/export
#logging.info("Start export JSON part...")
# logging.info("Processing services...")
# services = zabbix_.service.get(selectParent=['name'], selectTimes='extend')
# dumps_json(object='services', data=services, key=('name', 'serviceid'), save_yaml=save_yaml, directory=directory, drop_keys=["status"])
def environ_or_required(key):
"Argparse environment vars helper"
if os.environ.get(key):
return {"default": os.environ.get(key)}
else:
return {"required": True}
if __name__ == "__main__":
args = "hosts"
zabbix_ = get_zabbix_connection(
)
#logging.info("All files will be save in {}".format(os.path.abspath(args.directory)))
main(
zabbix_=zabbix_,
save_yaml=False,
directory="./",
only="hosts"
)
logging.info("Execution de l'export terminée")
except:
logging.error("Erreur critique pendant l'exportation Zabbix, verifiez le token et l'url du serveur Zabbix", exc_info=True)
#print("\n" + "FIN DE L'EXPORT")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
FIN DE L'EXPORT ZABBIX DÉBUT DU L'IMPORT DANS NETBOX
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#print("\nIMPORT EN COURS SUR NETBOX")
logging.info("Debut de l'importation sur Netbox")
nb = pynetbox.api(url_netbox, token = token_netbox, threading=True)
nb.http_session.verify = False # désactive la verification du certificat pour netbox
try:
nb.status() # verification du fonctionnement de la connexion
except:
logging.critical('Connexion impossible au serveur Netbox')
mypath = './hosts' # chemin vers les fichiers exporter de zabbix
files=[path.join(mypath, f) for f in listdir(mypath) if f.endswith('.xml')] # récupération des fichiers dans le chemin 'mypath'
def postdevice(devname, newos, tenid, siteid, modid, serial, ip): # création d'une fonction permettant l'ajout d'un device sur netbox
try :
url = url_netbox + "api/dcim/devices/" # url de netbox (à changer en cas de migration) suivi de api/dcim/devices/
payload = json.dumps({
"name": devname,
"device_type": modid,
"device_role": "1",
"tenant": tenid,
"serial": serial,
"site": siteid,
"custom_fields":{
"OS": newos
}
})
headers = {
'accept': 'application/json',
'Authorization': 'Token ' + token_netbox,
'Content-Type': 'application/json'
}
requests.request("POST", url, headers=headers, data=payload, verify=False)
decid = nb.dcim.devices.get(name=devname)
decid = decid.id
ips.append(ip + ',' + str(decid))
logging.info(devname + " ajouté")
except: logging.warning("Échec de la création de " + devname)
def compten(slug):
x = nb.tenancy.tenants.get(slug = slug) # utilisation de netboxapi pour trouver le tenant correspondant à celui exporter de zabbix
tenid = x.id # pour récuperer son ID de tenant
return tenid
def compsite(slug): # utilisation de netboxapi pour trouver le site correspondant à celui exporter de zabbix
s = nb.dcim.sites.get(slug = slug) # pour récuperer son ID de site
siteid = s.id
return siteid
def comptype(modele):
#typeslug = nb.dcim.device_types.get(model = modele)
t = nb.dcim.device_types.get(model = modele)
modid = t.id
return modid
def compvend(vendor):
v = nb.dcim.manufacturers.get(name = vendor)
vendid = v.id
return vendid
def createnant(names): # fonction de création de vendeur (manufacturer)
tenslug = ''.join(names.split()).lower()
tenslug = tenslug.translate(str.maketrans('','', '"!#$%&()*+_,./:;<=>?@[\]^`{|}~'))
url = url_netbox + "api/tenancy/tenants/"
payload = json.dumps({
"name": names,
"slug": tenslug
})
headers = {
'accept': 'application/json',
'Authorization': 'Token ' + token_netbox,
'Content-Type': 'application/json'
}
requests.request("POST", url, headers=headers, data=payload, verify=False)
x = nb.tenancy.tenants.get(slug = tenslug) # utilisation de netboxapi pour trouver le tenant correspondant à celui exporter de zabbix
tenid = x.id
url = url_netbox + "api/dcim/sites/"
payload = json.dumps({
"name": names,
"slug": tenslug,
"tenant": tenid
})
headers = {
'accept': 'application/json',
'Authorization': 'Token ' + token_netbox,
'Content-Type': 'application/json'
}
requests.request("POST", url, headers=headers, data=payload, verify=False)
def creavend(vendor): # fonction de création de vendeur (manufacturer)
vendslug = ''.join(vendor.split()).lower()
vendslug = vendslug.translate(str.maketrans('','', '"!#$%&()*+_,./:;<=>?@[\]^`{|}~'))
url = url_netbox + "api/dcim/manufacturers/"
payload = json.dumps({
"name": vendor,
"slug": vendslug
})
headers = {
'accept': 'application/json',
'Authorization': 'Token ' + token_netbox,
'Content-Type': 'application/json'
}
requests.request("POST", url, headers=headers, data=payload, verify=False)
def creatype(modele, vendid): # fonction de création de modèle (device-type) au travers de l'API de netbox
typeslug = ''.join(modele.split()).lower()
typeslug = typeslug.translate(str.maketrans('','', '"!#$%&()*+_,./:;<=>?@[\]^`{|}~'))
url = url_netbox + "api/dcim/device-types/"
payload = json.dumps({
"manufacturer": vendid,
"model": modele,
"slug": typeslug
})
headers = {
'accept': 'application/json',
'Authorization': 'Token ' + token_netbox,
'Content-Type': 'application/json'
}
requests.request("POST", url, headers=headers, data=payload, verify=False)
def upos(newos, decid): # fonction de création de vendeur (manufacturer)
url = url_netbox + "api/dcim/devices/"+str(decid)+"/"
payload = json.dumps({
"custom_fields": {
"OS": newos
}
})
headers = {
'accept': 'application/json',
'Authorization': 'Token ' + token_netbox,
'Content-Type': 'application/json'
}
requests.request("PATCH", url, headers=headers, data=payload, verify=False)
devices=list(nb.dcim.devices.all())
ldevice=' '.join([str(item) for item in devices])
tenants=list(nb.tenancy.tenants.all())
ltenant=' '.join([str(item) for item in tenants])
vendors=list(nb.dcim.manufacturers.all())
ldevtype=list(nb.dcim.device_types.all())
ips= []
for file in files: # On regarde les fichiers exportés de zabbix
devname = ''
newos = ''
tenid = ''
siteid = ''
modid = ''
serial = ''
oldserial = ''
checker = 0
tree = ET.parse(file)
root = tree.getroot()
with open(file, 'r') as f:
try:
if 'Switchs' not in f.read():
continue
for name in root.findall('./hosts/host/name'): # On trouve le noms du host
devname = name.text
for ip in root.iter('ip'): # Les IP serviront a la connexion sur les switchs pour les informations supplémentaires
if ip != '':
ip = ip.text
else:
continue
if devname in ldevice: # On le compare a notre export netbox pour voir
dec = nb.dcim.devices.get(name=devname)
decid = dec.id
for ip in root.iter('ip'): # Les IP serviront a la connexion sur les switchs pour les informations supplémentaires
ip = ip.text
ips.append(ip + ',' + str(decid))
logging.debug(devname + " deja dans Netbox : " + ip)
break
for newos in root.iter('os'): # On récupere l'os dans le fichier d'hote zabbix
newos = newos.text
if dec.custom_fields.get('OS') == newos:
break
else:
upos(newos, decid)
break # il n'existe pas déjà
else:
for name in root.findall('./hosts/host/groups/group/name'): # On trouve les groupes
if "Switchs" in name.text: # On verifie que le host exporter de zabbix est un switch
sep = "/"
names = name
names = (names.text.split(sep, 1)[0]) # On récupere le nom du groupe d'hote de l'hote
if names not in ltenant: # Si il est dans le fichier de tenant exporter depuis netbox
tenants=list(nb.tenancy.tenants.all())
ltenant=' '.join([str(item) for item in tenants])
if names not in ltenant: # Si il est dans le fichier de tenant exporter depuis netbox
createnant(names)
slug = nb.tenancy.tenants.get(name=names)
tenid = ""
tenid = (compten(slug)) # on appelle la fonction de récuperation de tenant id
siteid = ""
siteid = (compsite(slug)) # on appelle la fonction de récuperation de site id
for type_full in root.iter('type_full'): # on récupere le modèle du switch dans le fichier d'hote zabbix
modele = type_full.text
for vendor in root.iter('vendor'): # On récupere le vendeur dans le fichier d'hote zabbix
vendeur = vendor.text
checker = 1
if vendeur == 'Hewlett-Packard' or vendeur == 'Hewlett Packard' or modele.startswith('HP ') or modele.startswith('Aruba JL'):
vendeur = 'HP'
if vendeur in vendors: # on regarde si il est déjà dans netbox
vendid = (compvend(vendeur)) # si oui on récupere son manufacturer ID
else:
vendors=list(nb.dcim.manufacturers.all())
if vendeur in vendors:
vendid = (compvend(vendeur)) # si oui on récupere son manufacturer ID
else:
creavend(vendeur) # sinon on le créer
vendid = (compvend(vendeur))
if checker == 0:
break
for type_full in root.iter('type_full'): # on récupere le modèle du switch dans le fichier d'hote zabbix
modele = type_full.text
if vendeur == 'Inconnu':
modele = 'Inconnu'
if modele in ldevtype: # si il est deja dans netbox on récupere son ID
modid = (comptype(modele))
else: # sinon on le créer
ldevtype=list(nb.dcim.device_types.all())
if modele in ldevtype: # si il est deja dans netbox on récupere son ID
modid = (comptype(modele))
else:
creatype(modele, vendid)
modid = (comptype(modele))
for newos in root.iter('os'): # On récupere l'os dans le fichier d'hote zabbix
newos = newos.text
for serial in root.iter('serialno_a'):
serial = serial.text
break
for ip in root.iter('ip'): # Les IP serviront a la connexion sur les switchs pour les informations supplémentaires
ip = ip.text
postdevice(devname, newos, tenid, siteid, modid, serial,ip) # appelle de la fonction de création de device
except: logging.warning('Traitement du fichier ' + devname + ' impossible.')
#shutil.rmtree('./hosts')
class procurve:
def sshvlan(ssh, expect_string, tenid, siteid):
lvlan=[] #création d'une liste pour les vlans
commande = "show vlan"
output = ssh.send_command(commande, expect_string=expect_string) #envoie de la commande show vlan
with open('./res/vlan.txt', 'w') as v:
v.write(output)
v.close
with open('./res/vlan.txt', 'r') as r:
for line in r: #pour chaque ligne dans le fichier
vlan = line.split(' ')[0].replace(' ', '')
l = line.split(' ')
while '' in l: l.remove('')
try:
int(l[0])
vlaname = l[1] # si l est un nombre (vlan id) on l'ajoute a la variable
except:continue
if nb.ipam.vlans.get(vid=vlan, tenant_id= tenid) == None: #si le vlan n'existe pas on le créer
nb.ipam.vlans.create(name=vlaname, vid=vlan, status='active', tenant=tenid, site=siteid)
lvlan.append(vlan) # on l'ajoute a la liste
r.close
procurve.sshint(lvlan, ssh, expect_string, decid, tenid)
def sshint(lvlan, ssh, expect_string, decid, tenid):
commande = "show ip"
print(commande)
output = ssh.send_command(commande, expect_string=expect_string)
with open('./res/ip.txt', 'w') as i:
i.write(output)
with open('./res/ip.txt') as r:
i = 0
for line in r:
if "--------" in line:
i = 1
continue
if i == 1:
try:
x = line.split(' ')
x = list(filter(None, x))
ip = x[3]
submask = x [4]
except: continue
try:
intid = nb.dcim.interfaces.create(device=decid, name='management', type='virtual').id # si l'interface vlan303 existe pas on la créer
except:
intid = nb.dcim.interfaces.get(device_id=decid, name='management', type='virtual').id # sinon on l'a créer
maskcidr=IPAddress(submask).netmask_bits() # conversion du format x.x.x.x en CIDR
ip = ip+'/'+str(maskcidr) # on ajoute le masque a l'IP
if nb.ipam.ip_addresses.get(address=ip,tenant_id=tenid) == None: # si l'ip n'est pas dans netbox
ipid = nb.ipam.ip_addresses.create(address=ip, tenant=tenid, assigned_object_type='dcim.interface' ,assigned_object_id=intid).id # on créer l'ip
else:ipid = nb.ipam.ip_addresses.get(address=ip, tenant_id=tenid).id # sinon on l'a récupere
dec = nb.dcim.devices.get(id=decid) # on récupere l'objet
dec.primary_ip4 = ipid # on ajoute l'ip a l'objet
dec.save() # on met a jour l'objet dans netbox
r.close
lint=[] # creation d'une liste pour les interfaces
commande = "show interface brief" # on fait des show vlans pour tout les vlans
output = ssh.send_command(commande, expect_string=expect_string)
with open('./res/int.txt', 'w') as v:
v.write(output)
v.close
i = 0
with open('./res/int.txt', 'r') as r:
for line in r: # pour chaque ligne dans le show vlan x
if '--------' in line: # tries dans les lignes pour ne récuperer que les interfaces
i = 1
continue
if i==0 or line == '': continue
line = line.split(' ')
line = list(filter(None, line))
inte = line[0]
print('inte = ' + inte)
if inte == '\n': continue
try:
if inte not in lint: # si l'interface n'est pas encore dans la liste des vlans
lint.append(inte) # on l'ajoute
if 'Up' in line: status = 1 # si l'interface est up on change la valeur de status
else: status = 0
typ='1000base-t' # a changer, par défaut les interfaces sont en 1Ge
try:
nb.dcim.interfaces.create(device=decid, name=str(inte), type=typ, enabled=status) # on créer l'interface
logging.debug('Interface ' + inte + ' sur le device ' + decid + ' créé')
except: pass
except: pass
#else: print('non')
r.close
print(lint)
try:
print('interface : ' + inte)
inter=nb.dcim.interfaces.get(name=str(inte), device_id=decid) # on récupere l'objet netbox de l'interface
commande = 'show vlan ports ' + str(inte) + ' detail'
print(commande)
output = ssh.send_command(commande, expect_string=expect_string)
with open('./res/tempint.txt', 'w') as t:
t.write(output)
with open('./res/tempint.txt', 'r') as t:
taggvlan=[]
for line in t: # pour chaque ligne dans le fichier
if line == '\n': continue # on skip les lignes vide
vlanid = line.split(' ')[0].replace(' ', '')
try:
int(vlanid) # on verife que le vid est bien un numero
tem=nb.ipam.vlans.get(vid=vlanid,tenant_id=tenid).id
if 'Tagged' in line: # si tagged dans la ligne, l'interface est taggé
vlanid2=nb.ipam.vlans.get(vid=vlanid, tenant_id=tenid).id # on récupere les vlans déjà sur l'interface
if vlanid2 not in taggvlan: # si l'interface n'est pas encore dans la liste des vlans tag
taggvlan.append(vlanid2) # on l'ajoute
elif 'Untagged' in line: # si untagged dans la ligne
untagid=nb.ipam.vlans.get(vid=vlanid,tenant_id=tenid).id # on récupere l'objet vlan dans netbox
inter.update({"untagged_vlan" : untagid}) # on update l'interface avec les vlans untagged
else: continue
tint=nb.dcim.interfaces.get(device_id=decid, name=str(inte))
except:
continue
if taggvlan==[]:
inter.update({"mode" : "access", "untagged_vlan" : untagid}) # si il n'y a pas d'interface tagged, on ajoute le tag 'untagged' sur l'interface
else:
inter.update({"mode" : "tagged", "tagged_vlans" : taggvlan}) # sinon on ajoute les vlans tag
except:
logging.error("L'interface " + inte + "n'a pas pu être récuperé")
continue
r.close
class aruba:
def sshvlan(ssh, expect_string, tenid, siteid):
try:
lvlan=[]
commande = "show vlan"
output = ssh.send_command(commande, expect_string=expect_string)
with open('./res/vlan.txt', 'w') as v:
v.write(output)
v.close
with open('./res/vlan.txt', 'r') as r:
i = 0
for line in r:
if i != 2: # on attend d'avoir vu les deux lignes -------- dans le fichier
if '---------------------------------------------------------------------------------------------------' in line:
i = i+1
continue
else: continue
spl = line.split(' ') # on split la ligne
spl = list(filter(None, spl))
vid = spl[0] # on récupere le vlan id
try: spl[1]
except: continue
vlaname = spl[1] # le nom du vlan
try:
int(vid) # on verifie que le vid est l'id d'un vlan
except: continue
if nb.ipam.vlans.get(vid=vid, tenant_id= tenid) == None: # si le vlan n'existe pas dans netbox
try:
nb.ipam.vlans.create(name=vlaname, vid=vid, status='active', tenant=tenid, site=siteid) # on le créer
except: logging.error('Création du vlan ' + vid + 'dans ' + tenid)
lvlan.append(vid) # on l'ajoute a la liste de vlan
r.close
return(lvlan)
except:
r.close
logging.error('Erreur pendant aruba.sshvlan')
def sshint(lvlan, ssh, expect_string, decid, tenid):
try:
lint=[]
commande = "show interface brief"
output = ssh.send_command(commande, expect_string=expect_string)
with open('./res/int.txt', 'w') as v:
v.write(output)
v.close
with open('./res/int.txt', 'r') as r:
i = 0
for line in r:
if i != 2: # on attend d'avoir vu les deux lignes -------- dans le fichier
if '------------------------------------------------------------' in line:
i = i+1
continue
else: continue
spl = line.split(' ') # on split la ligne
spl = list(filter(None, spl)) # on retire les elements vide de la liste
inte = spl[0] # on récupere l'interface
print('int' + str(inte))
if 'up' in line: status = 1 # si elle est up on la tag up
else: status = 0 # sinon elle est down
try:
if inte not in lint: # si l'interface est pas dans la liste des interfaces
lint.append(inte) # on l'ajoute a la liste
if 'up' in line: status = 1 # si elle est up on la tag up
else: status = 0 # sinon elle est down
if spl[1] == '--' and spl[2]: # si il y a -- c'est une interface virtuelle
continue
else: typ='1000base-t' # a changer, par défaut les interfaces sont en 1Ge
try:
nb.dcim.interfaces.create(device=decid, name=str(inte), type=typ, enabled=status) # on essaie de créer l'interface
except: pass
except: continue
#else: print('non')
r.close
print('interface : ' + inte)
inter=nb.dcim.interfaces.get(name=str(inte), device_id=decid) # on récupere l'objet
commande = 'show vlan port ' + str(inte)
try:
output = ssh.send_command(commande, expect_string=expect_string)
except:
continue
#print(output)
with open('./res/intconf.txt', 'w') as v:
v.write(output)
v.close
with open('./res/intconf.txt', 'r') as r:
taggvlan=[]
i = 0
for line in r:
if i != 2: # on attend d'avoir vu les deux lignes -------- dans le fichier
if '------------------------------------------------' in line:
i = i+1
continue
else: continue
vlanid = line.split(' ')[0]
if 'trunk' in line: # si trunk dans la ligne, l'interface est trunk
vlanid2=nb.ipam.vlans.get(vid=vlanid, tenant_id=tenid).id
if vlanid2 not in taggvlan:
taggvlan.append(vlanid2) # si pas dans la liste on ajoute
elif 'access' in line: # si access interface access
untagid=nb.ipam.vlans.get(vid=vlanid,tenant_id=tenid).id
inter.update({"untagged_vlan" : untagid}) # on met a jour l'interface
elif 'native-untagged' in line: # si native untagged
untagid=nb.ipam.vlans.get(vid=vlanid,tenant_id=tenid).id
inter.update({"untagged_vlan" : untagid}) # on met a jour l'interface
else: continue
if taggvlan==[]:
inter.update({"mode" : "access", "untagged_vlan" : untagid}) # si il n'y a pas d'interface tagged, on ajoute le tag 'untagged' sur l'interface
else:
inter.update({"mode" : "tagged", "tagged_vlans" : taggvlan}) # sinon on ajoute les vlans tag
r.close
aruba.sship(ip,decid,tenid)
except:logging.error('Erreur pendant aruba.sshint')
def sship(ip,decid,tenid):
try:
commande = "show ip interface brief"
output = ssh.send_command(commande, expect_string=expect_string)
with open('./res/ip.txt', 'w') as i:
i.write(output)
with open('./res/ip.txt', 'r') as r:
for line in r:
if 'Address' in line:
continue
else:
if '.' in line:
try:
intid = nb.dcim.interfaces.create(device=decid, name='management', type='virtual').id
except:
intid = nb.dcim.interfaces.get(device_id=decid, name='management', type='virtual').id
lip = line.split(' ')
lip = list(filter(None, lip))
ip = lip[1]
if nb.ipam.ip_addresses.get(address=ip,tenant_id=tenid) == None:
ipid = nb.ipam.ip_addresses.create(address=ip, tenant=tenid, assigned_object_type='dcim.interface' ,assigned_object_id=intid).id
else:ipid = nb.ipam.ip_addresses.get(address=ip, tenant_id=tenid).id
dec = nb.dcim.devices.get(id=decid)
dec.primary_ip4 = ipid
dec.save()
r.close
except: logging.error('Erreur pendant aruba.sship')
#ips = ['172.30.205.136,49645','172.30.205.140,49843']
for item in ips:
try:
ip = item.split(',', 1)[0]
decid=item.split(',', 1)[1]
print(ip)
print(decid)
dec = nb.dcim.devices.get(id=decid)
tenid=dec.tenant.id
siteid=dec.site.id
if decid == None:
continue
if dec.device_type.manufacturer == nb.dcim.manufacturers.get(name='HP'):
try:
ssh = ConnectHandler(device_type='hp_procurve', ip=ip, username=username, password=password)
except:
logging.error('Connexion a ' + ip + ' impossible.')
expect_string = ssh.find_prompt()
ssh.send_command("terminal length 500", expect_string=expect_string)
commande="show run"
output = ssh.send_command(commande, expect_string=expect_string)
with open('./run/'+decid+'.txt', 'w') as newid:
newid.write(output)
with open('./run/'+decid+'.txt', 'r') as newid:
try:
with open('./old/'+decid+'.txt', 'r') as oldid:
if filecmp.cmp('./old/'+decid+'.txt', './run/'+decid+'.txt') == True:
continue
with open('./old/'+decid+'.txt', 'w') as newid:
newid.write(output)
except:
with open('./old/'+decid+'.txt', 'w') as newid:
newid.write(output)
logging.info('Modification du device : ' + str(dec))
lvlan = procurve.sshvlan(ssh, expect_string, tenid, siteid)
if dec.device_type.manufacturer == nb.dcim.manufacturers.get(name='Aruba'):
try:
ssh = ConnectHandler(device_type='aruba_os', ip=ip, username=username, password=password)
expect_string = ssh.find_prompt()
ssh.send_command("no page", expect_string=expect_string)
commande="show run"
output = ssh.send_command(commande, expect_string=expect_string)
except:
logging.error('Connexion sur ' + ip + ' impossible.')
with open('./run/'+decid+'.txt', 'w') as newid:
newid.write(output)
with open('./run/'+decid+'.txt', 'r') as newid:
try:
with open('./old/'+decid+'.txt', 'r') as oldid:
if filecmp.cmp('./old/'+decid+'.txt', './run/'+decid+'.txt') == True:
print("yup")
continue
else: pass
with open('./old/'+decid+'.txt', 'w') as newid:
newid.write(output)
except:
with open('./old/'+decid+'.txt', 'w') as newid:
newid.write(output)
logging.info('Modification du device : ' + str(dec))
lvlan = aruba.sshvlan(ssh, expect_string, tenid, siteid)
aruba.sshint(lvlan, ssh, expect_string, decid, tenid)
#else: print("faux")
except:
logging.error('Erreur pendant le traitement de ' + item)
continue
logging.info('Mise a jour de Netbox terminé \n')
print("\n" + "--- %s secondes ---" % (time.time() - start_time))
| AlexandrePoix/Projet_Netbox | Script_Netbox.py | Script_Netbox.py | py | 39,789 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.INFO",... |
28905884657 | import csv
from datetime import datetime, date
from dateutil import parser
import os
import pytz
from app import db
from app.utils.editdiff import EditDiff, ChangedValue, ChangedRow
import logging
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import class_mapper, relationship, validates
class DataMixin(object):
def to_dict(self):
d = {}
# get column attributes, skip any nulls
for column in self.__table__.columns:
repr_fn = column.info.get("repr")
attr = getattr(self, column.name)
if attr is not None:
if repr_fn is not None:
attr = repr_fn(attr)
d[column.name] = attr
# get derived fields (hybrid_property)
for key, prop in inspect(self.__class__).all_orm_descriptors.items():
if isinstance(prop, hybrid_property):
d[key] = getattr(self, key)
return d
class Batch(db.Model, DataMixin):
__tablename__ = 'batches'
# primary key
batchId = db.Column(db.Integer, primary_key=True)
createdAt = db.Column(db.DateTime(timezone=True), nullable=False)
publishedAt = db.Column(db.DateTime(timezone=True))
shiftLead = db.Column(db.String(100))
batchNote = db.Column(db.String)
dataEntryType = db.Column(db.String)
logCategory = db.Column(db.String)
link = db.Column(db.String)
user = db.Column(db.String)
# these fields are only relevant for an edit batch
changedFields = db.Column(db.String)
numRowsEdited = db.Column(db.Integer)
# false if preview state, true if live
isPublished = db.Column(db.Boolean, nullable=False)
# false if part of a regular data push, true if came in through an edit API endpoint
isRevision = db.Column(db.Boolean, nullable=False)
coreData = relationship('CoreData', backref='batch')
@hybrid_property
def changedDatesMin(self):
if not self.coreData:
return None
d = min(d.date for d in self.coreData)
return str(d)
@hybrid_property
def changedDatesMax(self):
if not self.coreData:
return None
d = max(d.date for d in self.coreData)
return str(d)
# This method isn't used when the object is read from the DB; only when a new one is being
# created, as from a POST JSON payload.
def __init__(self, **kwargs):
# parse datetime fields
# if there is no createdAt field, set it to datetime now
if 'createdAt' not in kwargs:
kwargs['createdAt'] = pytz.utc.localize(datetime.now())
else:
logging.info(
'New batch came in with existing createdAt: %s' % kwargs['createdAt'])
# setting default values for isPublished, isRevision: mimics preview state (if not set)
if 'isPublished' not in kwargs:
kwargs['isPublished'] = False
if 'isRevision' not in kwargs:
kwargs['isRevision'] = False
mapper = class_mapper(Batch)
relevant_kwargs = {k: v for k, v in kwargs.items() if k in mapper.attrs.keys()}
super(Batch, self).__init__(**relevant_kwargs)
def to_dict(self):
d = super(Batch, self).to_dict()
d['coreData'] = [coreData.to_dict() for coreData in self.coreData]
return d
_FIPS_MAP = None
def fips_lookup(state):
global _FIPS_MAP
if _FIPS_MAP is None:
# hack: load the fips lookup once
path = os.path.join(os.path.dirname(__file__), 'fips-lookup.csv')
_FIPS_MAP = {}
with open(path) as f:
reader = csv.DictReader(f)
for row in reader:
_FIPS_MAP[row['state']] = row['fips']
return _FIPS_MAP[state]
_POPULATION_MAP = None
def population_lookup(state):
global _POPULATION_MAP
if _POPULATION_MAP is None:
path = os.path.join(os.path.dirname(__file__), 'population-lookup.csv')
_POPULATION_MAP = {}
with open(path) as f:
reader = csv.DictReader(f)
for row in reader:
_POPULATION_MAP[row['state']] = int(row['population'])
return _POPULATION_MAP[state]
class State(db.Model, DataMixin):
__tablename__ = 'states'
state = db.Column(db.String, primary_key=True, nullable=False)
name = db.Column(db.String)
covid19Site = db.Column(db.String)
covid19SiteOld = db.Column(db.String)
covid19SiteSecondary = db.Column(db.String)
covid19SiteTertiary = db.Column(db.String)
covid19SiteQuaternary = db.Column(db.String)
covid19SiteQuinary = db.Column(db.String)
twitter = db.Column(db.String)
notes = db.Column(db.String)
pui = db.Column(db.String)
covidTrackingProjectPreferredTotalTestUnits = db.Column(db.String)
covidTrackingProjectPreferredTotalTestField = db.Column(db.String)
totalTestResultsField = db.Column(db.String)
totalTestResultsFieldDbColumn = db.Column(db.String, nullable=False)
# here for parity with public API, deprecated field
@hybrid_property
def pum(self):
return False
@hybrid_property
def fips(self):
return fips_lookup(self.state)
@hybrid_property
def population(self):
return population_lookup(self.state)
@validates('totalTestResultsFieldDbColumn')
def validate_totalTestResultsFieldDbColumn(self, key, value):
"""Validate the totalTestResultsFieldDbColumn value, used to calculate totalTestResults.
Acceptable values are either a valid CoreData column name or a known special keyword like 'posNeg'.
"""
ttr_special_keywords = ['posNeg']
is_valid = value in ttr_special_keywords or value in [column.name for column in CoreData.__table__.columns]
assert is_valid, "invalid value for totalTestResultsFieldDbColumn"
return value
def __init__(self, **kwargs):
mapper = class_mapper(State)
relevant_kwargs = {k: v for k, v in kwargs.items() if k in mapper.attrs.keys()}
super(State, self).__init__(**relevant_kwargs)
class CoreData(db.Model, DataMixin):
__tablename__ = 'coreData'
# composite PK: state_name, batch_id, date
state = db.Column(db.String, db.ForeignKey('states.state'),
nullable=False, primary_key=True)
state_obj = relationship("State", lazy="selectin")
batchId = db.Column(db.Integer, db.ForeignKey('batches.batchId'),
nullable=False, primary_key=True)
# the day we mean to report this data for; meant for "states daily" extraction
date = db.Column(db.Date, nullable=False, primary_key=True,
info={'repr': lambda x: x.strftime('%Y-%m-%d')})
# data columns
positive = db.Column(db.Integer, info={"includeInUSDaily": True})
negative = db.Column(db.Integer, info={"includeInUSDaily": True})
pending = db.Column(db.Integer, info={"includeInUSDaily": True})
hospitalizedCurrently = db.Column(db.Integer, info={"includeInUSDaily": True})
hospitalizedCumulative = db.Column(db.Integer, info={"includeInUSDaily": True})
hospitalizedDischarged = db.Column(db.Integer, info={"includeInUSDaily": True})
inIcuCurrently = db.Column(db.Integer, info={"includeInUSDaily": True})
inIcuCumulative = db.Column(db.Integer, info={"includeInUSDaily": True})
onVentilatorCurrently = db.Column(db.Integer, info={"includeInUSDaily": True})
onVentilatorCumulative = db.Column(db.Integer, info={"includeInUSDaily": True})
recovered = db.Column(db.Integer, info={"includeInUSDaily": True})
death = db.Column(db.Integer, info={"includeInUSDaily": True})
deathConfirmed = db.Column(db.Integer, info={"includeInUSDaily": True})
deathProbable = db.Column(db.Integer, info={"includeInUSDaily": True})
probableCases = db.Column(db.Integer, info={"includeInUSDaily": True})
# PCR/viral fields
totalTestsViral = db.Column(db.Integer, info={"includeInUSDaily": True})
positiveTestsViral = db.Column(db.Integer, info={"includeInUSDaily": True})
negativeTestsViral = db.Column(db.Integer, info={"includeInUSDaily": True})
positiveCasesViral = db.Column(db.Integer, info={"includeInUSDaily": True})
totalTestEncountersViral = db.Column(db.Integer, info={"includeInUSDaily": True})
totalTestsPeopleViral = db.Column(db.Integer, info={"includeInUSDaily": True})
# Antibody fields
totalTestsAntibody = db.Column(db.Integer, info={"includeInUSDaily": True})
positiveTestsAntibody = db.Column(db.Integer, info={"includeInUSDaily": True})
negativeTestsAntibody = db.Column(db.Integer, info={"includeInUSDaily": True})
positiveTestsPeopleAntibody = db.Column(db.Integer, info={"includeInUSDaily": True})
negativeTestsPeopleAntibody = db.Column(db.Integer, info={"includeInUSDaily": True})
totalTestsPeopleAntibody = db.Column(db.Integer, info={"includeInUSDaily": True})
# Antigen testing
totalTestsPeopleAntigen = db.Column(db.Integer, info={"includeInUSDaily": True})
positiveTestsPeopleAntigen = db.Column(db.Integer, info={"includeInUSDaily": True})
negativeTestsPeopleAntigen = db.Column(db.Integer, info={"includeInUSDaily": True})
totalTestsAntigen = db.Column(db.Integer, info={"includeInUSDaily": True})
positiveTestsAntigen = db.Column(db.Integer, info={"includeInUSDaily": True})
negativeTestsAntigen = db.Column(db.Integer, info={"includeInUSDaily": True})
# from worksheet, "Notes" column (made by checker or doublechecker)
privateNotes = db.Column(db.String)
# Public Notes related to state
notes = db.Column(db.String)
# these are the source-of-truth time columns in UTC/GMT. String representations are in UTC.
lastUpdateTime = db.Column(db.DateTime(timezone=True),
info={'repr': lambda x: x.astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")})
dateChecked = db.Column(db.DateTime(timezone=True),
info={'repr': lambda x: x.astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")})
checker = db.Column(db.String(100))
doubleChecker = db.Column(db.String(100))
publicNotes = db.Column(db.String)
dataQualityGrade = db.Column(db.String)
# TODO: which columns from state matrix and states? In general, what metadata?
# What other columns are we missing?
sourceNotes = db.Column(db.String)
# Returns a list of CoreData column names representing numerical data that needs to be summed
# and served in States Daily.
@classmethod
def numeric_fields(cls):
colnames = []
for column in cls.__table__.columns:
if column.info.get("includeInUSDaily") == True:
colnames.append(column.name)
return colnames
@staticmethod
def stringify(timestamp):
return timestamp.astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
@hybrid_property
def lastUpdateEt(self):
# convert lastUpdateTime (UTC) to ET, return a string that matches how we're outputting
# in the public API
if self.lastUpdateTime is not None:
return self.lastUpdateTime.astimezone(pytz.timezone('US/Eastern')).strftime(
"%-m/%-d/%Y %H:%M")
else:
return None
@hybrid_property
def totalTestResultsSource(self):
"""The source column used to calculate totalTestResults, equal to the state's totalTestResultsFieldDbColumn"""
return self.state_obj.totalTestResultsFieldDbColumn
@hybrid_property
def totalTestResults(self):
"""Calculated value of total test results
This value is determined based on the state's totalTestResultsFieldDbColumn, with empty cells converted to 0.
If a CoreData column name is specified, that column will be used for totalTestResults.
Alternatively, the 'posNeg' keyword can be used to indicate totalTestResults = (positive+negative)"""
column = self.totalTestResultsSource
if column == 'posNeg': # posNeg: calculated value (positive + negative) of total test results.
if self.negative is None:
return self.positive or 0
if self.positive is None:
return self.negative or 0
return self.positive + self.negative
else: # column is a coreData column, return its value, converting none to 0
value = getattr(self, column)
return value
# Converts the input to a string and returns parsed datetime.date object
@staticmethod
def parse_str_to_date(date_input):
return parser.parse(str(date_input), ignoretz=True).date()
@staticmethod
def valid_fields_checker(candidates):
'''
dict[string] -> ([string], [string])
Gets a list of field names and returns a tuple of (valid_fields, unknown_fields).
If valid_fields is empty, then this list contains no data for this object.
If unknown_fields is not-empty, then this list contains extra fields
that mean nothing for this object, but we might want to alert on this.
In the valid fields, we exclude state, date and batchId because these are the
primary keys for the record, and all keys without values make it a dull record
'''
mapper = class_mapper(CoreData)
keys = mapper.attrs.keys()
candidate_set = set(candidates)
key_set = set(keys)
unknowns = candidate_set - key_set
valid = candidate_set & key_set
valid = valid - {x.name for x in mapper.primary_key}
return (valid, unknowns)
def field_diffs(self, dict_other):
''' Return the list of fields that dict_other would modify if applied
on this row.
Some business logic is applied, and some fields are skipped from comparison, field
aliases get special treatment.
Return ChangedRow if there are changes, or None if no changes
'''
diffs = []
if not dict_other:
return None
# we want to compare after all parsing is done
other = CoreData(**dict_other)
# special casing for date aliases
# TODO: define the ordering of expected date fields, and expectations
# if multiple aliases for the same field exist
if 'lastUpdateIsoUtc' in dict_other and not 'lastUpdateTime' in dict_other:
# if both fields exist this is not ideal, but the object prefers 'lastUpdateTime'.
# for now, 'lastUpdateTime' wins
dict_other['lastUpdateTime'] = dict_other['lastUpdateIsoUtc']
for field in CoreData.__table__.columns.keys():
# we expect batch IDs to be different, skip comparing those
if field == 'batchId':
continue
# for any other field, compare away
if field in dict_other and getattr(other, field) != getattr(self, field):
old = getattr(self, field)
new = getattr(other, field)
diffs.append(ChangedValue(field=field, old=old, new=new))
if diffs:
changes = ChangedRow(date=self.date, state=self.state, changed_values=diffs)
return changes
return None
@staticmethod
def _cleanup_date_kwargs(kwargs):
# accept either lastUpdateTime or lastUpdateIsoUtc as an input
last_update_time = kwargs.get('lastUpdateTime') or kwargs.get('lastUpdateIsoUtc')
if last_update_time:
if isinstance(last_update_time, str):
last_update_time = parser.parse(last_update_time)
if last_update_time.tzinfo is None:
raise ValueError(
'Expected a timezone with last update time: %s' % last_update_time)
kwargs['lastUpdateTime'] = last_update_time
date_checked = kwargs.get('dateChecked')
if date_checked:
if isinstance(date_checked, str):
date_checked = parser.parse(date_checked)
if date_checked.tzinfo is None:
raise ValueError(
'Expected a timezone with dateChecked: %s' % kwargs['dateChecked'])
kwargs['dateChecked'] = date_checked
# "date" is expected to be a date string, no times or timezones
if 'date' in kwargs:
kwargs['date'] = CoreData.parse_str_to_date(kwargs['date'])
else:
kwargs['date'] = date.today()
return kwargs
def copy_with_updates(self, **kwargs):
kwargs = self._cleanup_date_kwargs(kwargs)
self_props = self.to_dict()
self_props.update(kwargs)
return CoreData(**self_props)
def __init__(self, **kwargs):
# strip any empty string fields from kwargs
kwargs = {k: v for k, v in kwargs.items() if v is not None and v != ""}
kwargs = self._cleanup_date_kwargs(kwargs)
mapper = class_mapper(CoreData)
relevant_kwargs = {k: v for k, v in kwargs.items() if k in mapper.attrs.keys()}
super(CoreData, self).__init__(**relevant_kwargs)
| COVID19Tracking/covid-publishing-api | app/models/data.py | data.py | py | 17,043 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "sqlalchemy.inspection.inspect",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.hybrid.hybrid_property",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "app.db.Model",
"line_number": 36,
"usage_type": "attribute"
},
... |
43047803864 | # !/usr/bin/env python
# coding: utf-8
import json
import elasticsearch
from elasticsearch.exceptions import NotFoundError
import uuid
from wildzh.utils.config import ConfigLoader
__author__ = 'zhouhenglc'
class ExamEs(object):
def __init__(self, es_conf):
cl = ConfigLoader(es_conf)
host = cl.get('es', 'host')
port = cl.get('es', 'port')
ent = {"host": host, "port": port}
self._es_man = None
self.es_endpoint = ent
self.index = 'exam_v3'
self.index_type = 'exam'
self.index_fields = ['desc', 'options', 'answer']
self.no_index_fields = ['exam_no']
@property
def es_man(self):
if self._es_man is None:
hosts = [self.es_endpoint]
self._es_man = elasticsearch.Elasticsearch(hosts=hosts)
self.create_index()
return self._es_man
def delete_index(self):
return self.es_man.indices.delete(self.index)
def create_index(self):
if not self.es_man:
return
if self.es_man.indices.exists(self.index):
return
properties = {}
index_p = {'index': True,
'type': 'text',
'analyzer': 'ik_max_word',
'search_analyzer': 'ik_max_word'}
for field in self.index_fields:
properties[field] = index_p
no_index_p = {'type': 'text'}
for field in self.no_index_fields:
properties[field] = no_index_p
body = {'mappings': {'properties': properties}}
self.es_man.indices.create(self.index, body=body)
def re_create_index(self):
self.delete_index()
self.create_index()
def add_one(self, doc_id, body):
res = self.es_man.index(index=self.index, id=doc_id, body=body)
return res
def update_one(self, doc_id, body):
doc = {'doc': body}
res = self.es_man.update(self.index, doc_id, doc)
return res
def update_one_item(self, doc_id, exam_no, desc, options, answer,
select_mode=None):
body = {'desc': desc, 'options': options, 'answer': answer,
'sm': select_mode}
try:
res = self.update_one(doc_id, body)
except NotFoundError:
body['exam_no'] = exam_no
res = self.add_one(doc_id, body)
return res
def add_one_item(self, doc_id, exam_no, desc, options, answer,
select_mode=None):
body = {'exam_no': exam_no, 'desc': desc, 'options': options,
'answer': answer, 'sm': select_mode}
return self.add_one(doc_id, body)
def exists(self, doc_id):
r = self.es_man.exists(self.index, doc_id)
return r
def get_one(self, doc_id):
r = self.es_man.get(self.index, doc_id)
return r
def clear_index(self):
body = {"query": {"match_all": {}}}
r = self.es_man.delete_by_query(self.index, body=body)
return r
def clear_exam(self, exam_no):
query = {"constant_score" :
{"filter" :
{"term" : { "exam_no" : exam_no} } } }
body = {'query': query}
r = self.es_man.delete_by_query(self.index, body=body)
return r
def search(self, s, field=None):
if field not in self.index_fields:
field = self.index_fields[0]
res = self.es_man.search(index=self.index,
body={"query": {"match": {field: s}}})
print("Got %d Hits:" % res['hits']['total']['value'])
for hit in res['hits']['hits']:
print(hit)
print("%(desc)s %(options)s: %(answer)s" % hit["_source"])
def count_exam(self, exam_no):
query = {"constant_score" :
{"filter" :
{"term" : { "exam_no" : exam_no} } } }
body = {'query': query}
res = self.es_man.count(index=self.index, body=body)
return res['count']
def search_multi(self, s, fields=None):
if fields is None:
fields = self.index_fields
else:
fields = list(set(self.index_fields) & set(fields))
if len(fields) <= 0:
return []
res = self.es_man.search(index=self.index,
body={
"query": {
"multi_match": {
'query': s,
'fields': fields}}})
q_items = []
for hit in res['hits']['hits']:
q_item = {'_id': hit['_id'], 'score': hit['_score']}
q_item.update(hit["_source"])
q_items.append(q_item)
return q_items
if __name__ == "__main__":
ee = ExamEs('../../etc/es.conf')
# ee.re_create_index()
# doc_id = uuid.uuid4().hex
# ee.clear_index()
# ee.add_one_item(doc_id, '网络操作', '编辑网络', '创建网络,vlan网络类型不可编辑,如果多个网络类型,默认选择第一个')
print(ee.count_exam('1567228509'))
print(ee.clear_exam('1567228509'))
# ee.update_one_item('1111111', '网络操作2', '更新网络', '网络都是vlan的')
# ee.get_one(doc_id)
# ee.get_one('1111111111')
| meisanggou/wildzh | wildzh/classes/exam_es.py | exam_es.py | py | 5,345 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "wildzh.utils.config.ConfigLoader",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "elasticsearch.exceptions.NotFoundError",
"line_number": 75,
"usage_type": "n... |
653126704 | from flask import Blueprint
from flask import render_template,request,redirect,url_for
from models import product as pd
from .forms import ProductForm
from app import db
products = Blueprint('products', __name__, template_folder='templates')
@products.route('/', methods=['GET','POST'])
def index():
if request.method == 'POST':
lname = request.form['search']
return redirect(url_for('customers.searsh' ,lname=lname))
else:
return render_template('products/index.html')
@products.route('/addProduct', methods = ['GET','POST'])
def addProduct():
if request.method == 'POST':
title = request.form['title']
description = request.form['description']
# article_number = request.form['article_number']
quantity = request.form['quantity']
newProduct = pd(title=title,description=description, quantity=quantity)
db.session.add(newProduct)
db.session.commit()
return redirect(url_for('products.index'))
else:
form = ProductForm()
return render_template('products/addProduct.html', form = form)
@products.route('/listOfProducts')
def listOfProducts():
products = pd.query.all()
return render_template('products/listOfProducts.html', products = products)
| SVLozovskoy/flask-crm | products/blueprint.py | blueprint.py | py | 1,279 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.request.... |
16414347802 | #!/usr/bin/python
from optparse import OptionParser
import logging
from time import sleep
import random
import sys
from formats import formats
from messages import messages
parser = OptionParser()
parser.add_option("-m","--mode", dest="mode")
parser.add_option("-f", "--format", dest="format",
help="logging format", metavar="FILE")
parser.add_option("-s", "--speed", dest="speed",
help="logging speed", metavar="FILE")
parser.add_option("-r", "--random_speed", dest="random_speed",
help="logging speed", metavar="FILE")
parser.add_option("-t", "--rotate", dest="rotate", default=True,
help="roTate between messages", action="store_true")
parser.add_option("-a", "--ascii", dest="ascii", default=False,
help="add ascii art")
(options, args) = parser.parse_args()
format = options.format or 'default'
mode = options.mode or 'default'
speed = options.speed or 'medium'
is_random_speed = options.random_speed or False
rotate_messages = options.rotate or False
use_ascii_art = options.ascii or False
speed_map = { 'slow':.5,
'medium': .1,
'fast':.01,
'lurch':1,
'glacial': 2,
}
chug_speed = speed_map[speed]
logging.basicConfig(format=formats[format])
logger = logging.getLogger('tcpserver')
RANDOM_RANGES = {
'dot_widths' : [3, 5, 7, 9, 13, 20,],
'sleep_counts' : [.001, .01, .5, 1]
}
def get_random_num(choices=[]):
random.shuffle(choices)
return choices[0]
def nap(nap_type=None, is_random=is_random_speed):
if is_random:
sleep(get_random_num(choices=RANDOM_RANGES[nap_type]))
else:
sleep(chug_speed)
return
def write_dots(is_random=True, dot_width=40):
if is_random: dot_width = get_random_num(RANDOM_RANGES['dot_widths'])
for x in xrange(0,dot_width):
sys.stdout.write('. ')
sys.stdout.flush()
nap(nap_type='sleep_counts')
sys.stdout.write("\n")
def render_ascii(ascii):
for l in ascii:
l = list(l)
for c in range(0,len(l)):
sys.stdout.write(l[c])
sys.stdout.flush()
nap(nap_type='sleep_counts')
sys.stdout.write("\n")
def run():
if use_ascii_art:
import os
ascii_dir = os.listdir('./ascii')
while True:
nap(nap_type='sleep_counts')
if rotate_messages and not use_ascii_art:
logger.warning(messages[get_random_num(messages.keys())])
write_dots()
elif use_ascii_art:
random.shuffle(ascii_dir)
ascii_file = './ascii/' + ascii_dir[0]
ascii_image = open(ascii_file,'r').readlines()
logger.warning(messages[get_random_num(messages.keys())])
render_ascii(ascii_image)
else:
logger.warning(messages[mode])
write_dots()
if __name__ == '__main__':
run()
| tobinmori/fauxprox | foxprox.py | foxprox.py | py | 2,928 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "optparse.OptionParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "formats.formats",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "logging.get... |
23998377870 | from enum import Enum
from singleton import Singleton
from datetime import datetime
Circuitstate = Enum("Circuitstate", ["CLOSED", "OPEN", 'HALFOPEN'])
class CircuitOpenException(Exception):
pass
class Circuitbreaker(Singleton):
"""Circuitbreaker is singleton because if multiple functions are decorated
with the same name, they should refer to the same circuit breaker, the name
can be just an arbitrary string chosen by functions, or the hostname or url"""
state = Circuitstate.CLOSED
ts = datetime.now()
def __new__(cls, name, timeout=None, retries=None):
"""Since the first invocation matters we ignore any paramters once set"""
it = super().__new__(cls, name)
it.timeout = getattr(it, "timeout", None) or timeout or 5
it.retries = getattr(it, "retries", None) or retries or 2
return it
def __enter__(self):
if not getattr(self, "_numtry", None):
self._numtry = 0
if self.state == Circuitstate.CLOSED:
return self
# broken circuit, if waited long enough then retry
elapsed = (datetime.now() - self.ts).total_seconds()
if elapsed > self.timeout:
self.state = Circuitstate.HALFOPEN
print("Timeout over, retrying as half open")
self._numtry = self.retries
return self
print("Circuit open, not allowed")
raise CircuitOpenException()
def __exit__(self, extype, exval, extb):
if not exval:
if self.state == Circuitstate.HALFOPEN:
self.state = Circuitstate.CLOSED
self._numtry = 0
return True
# we're here because of exception, if enough failed, break
self._numtry += 1
if self._numtry >= self.retries:
print(
f"Exception {extype}, {self._numtry} exceeded {self.retries} opening circuit"
)
self.ts = datetime.now()
self.state = Circuitstate.OPEN
else:
print(f"Exception {extype} {exval}, retry count {self._numtry}")
return True
def __call__(self, function):
"""When a function is wrapped, we use the context manager ourselves"""
async def wrapp(*args, **kwargs):
try:
with self:
return await function(*args, **kwargs)
except CircuitOpenException:
pass
return None
return wrapp
if __name__ == "__main__":
x = Circuitbreaker("foo")
y = Circuitbreaker("foo", 4)
z = Circuitbreaker("bar")
assert x is y
print(x.timeout, y.timeout)
assert x is not z
| kousiknandy/cktbkr | circuitbreaker.py | circuitbreaker.py | py | 2,676 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "singleton.Singleton",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime"... |
34885760215 | import os
import imageio
import atexit
import math
from multiprocessing import Process, Queue
from gym.spaces import Box
from gym import utils
from gym.utils import seeding
import numpy as np
import mujoco_py
class PushObjectEnv(utils.EzPickle):
def __init__(self, frame_skip, max_timestep=3000, log_dir='', seed=None):
self.frame_skip = frame_skip
model_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'push_object.xml')
self.model = mujoco_py.load_model_from_path(model_path)
self.sim = mujoco_py.MjSim(self.model, nsubsteps=frame_skip)
self.data = self.sim.data
self.viewer = mujoco_py.MjViewer(self.sim)
self.joint_names = list(self.sim.model.joint_names)
self.joint_addrs = [self.sim.model.get_joint_qpos_addr(name) for name in self.joint_names]
self.obj_name = 'cube'
self.endeff_name = 'endeffector'
self.goal_pos = np.array([0., 0.])
self.radiuses = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.075]
self.level = len(self.radiuses)
self.dist_thresh = 0.01
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt))
}
self.t = 0
self.max_timestep = max_timestep
pos_actuators = [actuator for actuator in self.model.actuator_names if 'position' in actuator]
vel_actuators = [actuator for actuator in self.model.actuator_names if 'velocity' in actuator]
assert(len(pos_actuators) + len(vel_actuators) == len(self.model.actuator_names))
self.pos_actuator_ids = [self.model.actuator_name2id(actuator) for actuator in pos_actuators]
self.vel_actuator_ids = [self.model.actuator_name2id(actuator) for actuator in vel_actuators]
self.actuator_ids = self.pos_actuator_ids
self.act_dim = len(self.actuator_ids)
# compute array: position actuator's joint ranges, in order of self.pos_actuator_ids
pos_actuators_joints = self.model.actuator_trnid[self.pos_actuator_ids][:, 0]
self.joint_ranges = [self.model.jnt_range[joint] for joint in pos_actuators_joints]
self.joint_ranges = np.array(self.joint_ranges)
# initial position/velocity of robot and box
self.init_qpos = self.data.qpos.ravel().copy()
self.init_qvel = self.data.qvel.ravel().copy()
_ob, _reward, _done, _info = self.step(np.zeros(self.act_dim))
assert not _done
self.obs_dim = _ob.size
bounds = self.model.actuator_ctrlrange[self.actuator_ids].copy()
low = bounds[:, 0]
high = bounds[:, 1]
self.action_space = Box(low, high)
high = np.inf*np.ones(self.obs_dim)
low = -high
self.observation_space = Box(low, high)
self.reward_range = (-np.inf, np.inf)
self.seed(seed)
# set up videos
self.video_idx = 0
self.video_path = os.path.join(log_dir, "video/video_%07d.mp4")
self.video_dir = os.path.abspath(os.path.join(self.video_path, os.pardir))
self.recording = False
os.makedirs(self.video_dir, exist_ok=True)
print('Saving videos to ' + self.video_dir)
# close on exit
atexit.register(self.close)
def __del__(self):
self.close()
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): an action provided by the environment
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (boolean): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
"""
self.do_simulation(action)
ob = self._get_obs()
obj_pos = self.get_body_com(self.obj_name)
obj_pos_xy = obj_pos[:2]
# distance between object and goal
dist_sq_og = np.sum(np.square(obj_pos_xy - self.goal_pos))
rew_obj_goal = 0.1 * (np.exp(-800. * dist_sq_og) - 1.)
# distance between object and robot end-effector
endeff_pos = self.get_body_com(self.endeff_name)
dist_sq_eo = np.sum(np.square(endeff_pos - obj_pos))
rew_endeff_obj = 0.05 * (np.exp(-50. * dist_sq_eo) - 1.)
reward = rew_obj_goal + rew_endeff_obj
# reward_ctrl = -np.square(action).mean()
# reward = rew_obj_goal + reward_ctrl
done = False
info = dict()
if self.t > self.max_timestep:
done = True
info['dist_goal'] = np.sqrt(dist_sq_og)
self.t += 1
return ob, reward, done, info
def reset(self, rand_init_pos=False, init_pos=[0., 0.]):
"""Resets the state of the environment and returns an initial observation.
Returns: observation (object): the initial observation of the
space.
"""
self.t = 0
self.sim.reset()
ob = self.reset_model(rand_init_pos, init_pos)
return ob
def render(self, mode='human', close=False):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
close (bool): close all open renderings
Example:
class MyEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def render(self, mode='human'):
if mode == 'rgb_array':
return np.array(...) # return RGB frame suitable for video
elif mode is 'human':
... # pop up a window and render
else:
super(MyEnv, self).render(mode=mode) # just raise an exception
"""
if not close: # then we have to check rendering mode
modes = self.metadata.get('render.modes', [])
if len(modes) == 0:
raise Exception('{} does not support rendering (requested mode: {})'.format(self, mode))
elif mode not in modes:
raise Exception('Unsupported rendering mode: {}. (Supported modes for {}: {})'.format(mode, self, modes))
if close:
if self.viewer is not None:
self.viewer.finish()
self.viewer = None
return
if mode == 'rgb_array':
self.viewer.render()
data, width, height = self._get_viewer().get_image()
return np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :]
elif mode == 'human':
self.viewer.render()
def close(self):
"""Override _close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
# _closed will be missing if this instance is still
# initializing.
if not hasattr(self, '_closed') or self._closed:
return
self._close()
self._closed = True
def start_record_video(self, path=None):
if self.recording:
print('record video in progress. calling stop before start.')
self.stop_record_video()
self.recording = True
self.viewer._record_video = True
self.viewer._hide_overlay = True
fps = (1 / self.viewer._time_per_render)
path = path or (self.video_path % self.video_idx)
self.video_process = Process(target=save_video,
args=(self.viewer._video_queue, path, fps))
self.video_process.start()
def stop_record_video(self):
self.viewer._video_queue.put(None)
self.video_process.join()
self.video_idx += 1
self.recording = False
print('finished recording video %d' % self.video_idx)
# ----------------------------
@property
def spec(self):
return None
def reset_model(self, rand_init_pos, init_pos=[0., 0.]):
"""
Reset the robot degrees of freedom (qpos and qvel).
"""
init_qpos = self.init_qpos
if rand_init_pos:
# center around zero, with radius 0.03
# obj_pos = np.random.uniform(size=[2,]) * 0.3 - 0.15
max_radius = self.radiuses[self.level - 1]
radius = np.random.uniform(0., max_radius)
print('level: %d, max_radius: %f, radius: %f' % (self.level, max_radius, radius))
angle = np.random.uniform(-math.pi, math.pi)
x = np.cos(angle) * radius
y = np.sin(angle) * radius
obj_pos = np.array([x, y])
else:
obj_pos = init_pos
init_qpos[:2] = obj_pos
self.set_state(self.init_qpos, self.init_qvel)
return self._get_obs()
def level_up(self):
self.level += 1
n_levels = len(self.radiuses)
self.level = np.minimum(self.level, n_levels)
print('increasing level to: %d' % self.level)
def viewer_setup(self):
"""
This method is called when the viewer is initialized and after every reset
Optionally implement this method, if you need to tinker with camera position
and so forth.
"""
pass
# -----------------------------
def set_state(self, qpos, qvel):
assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
state = self.sim.get_state()
state.qpos[:] = qpos
state.qvel[:] = qvel
self.sim.set_state(state)
self.sim.step()
self.sim.forward()
# self.model._compute_subtree() # pylint: disable=W0212
# self.model.forward()
@property
def dt(self):
return self.model.opt.timestep * self.frame_skip
def do_simulation(self, ctrl):
# compute end position given velocity control
# qpos_ctrl = [self.sim.data.qpos[addr] + qvel * self.dt for (addr, qvel) in zip(self.joint_addrs, ctrl)]
# print(str(qpos_ctrl))
# compute velocity control
n_vel_actuators = len(self.vel_actuator_ids)
vel_ctrl = np.zeros(shape=[n_vel_actuators])
# clip by -1, 1
ctrl = np.clip(ctrl, -1., 1.)
# scale position control up to joint range
pos_ctrl = self.denormalize_pos(ctrl)
self.sim.data.ctrl[self.actuator_ids] = pos_ctrl
self.sim.data.ctrl[self.vel_actuator_ids] = vel_ctrl # set velocity to zero for damping
self.sim.step()
self.sim.forward()
def denormalize_pos(self, pos_ctrl):
low = self.joint_ranges[:, 0]
high = self.joint_ranges[:, 1]
pos_ctrl = low + (pos_ctrl + 1.) / 2. * (high - low)
return pos_ctrl
def normalize_pos(self, pos):
low = self.joint_ranges[:, 0]
high = self.joint_ranges[:, 1]
pos = (pos - low) / (high - low) * 2. - 1.
return pos
# def _get_viewer(self):
# if self.viewer is None:
# self.viewer = mujoco_py.MjViewer()
# self.viewer.start()
# self.viewer.set_model(self.model)
# self.viewer_setup()
# return self.viewer
# com: center of mass?
def get_body_com(self, body_name):
idx = self.model.body_name2id(body_name)
return self.data.body_xpos[idx]
def get_body_comvel(self, body_name):
idx = self.model.body_name2id(body_name)
return self.data.xvelp[idx]
def get_body_xmat(self, body_name):
idx = self.model.body_name2id(body_name)
return self.data.body_xmat[idx]
# def state_vector(self):
# return np.concatenate([
# self.model.data.qpos.flat,
# self.model.data.qvel.flat
# ])
def _get_obs(self):
actuator_pos = self.data.actuator_length[self.pos_actuator_ids]
actuator_vel = self.data.actuator_velocity[self.vel_actuator_ids]
# actuator velocity can be out of [-1, 1] range, clip
# actuator_vel = actuator_vel.clip(-1., 1.)
# normalize pos
pos_cos = np.cos(actuator_pos)
pos_sin = np.sin(actuator_pos)
actuator_pos = self.normalize_pos(actuator_pos)
cube_com = self.get_body_com("cube")
return np.concatenate([
cube_com,
pos_cos,
pos_sin,
actuator_pos
])
# Separate Process to save video. This way visualization is
# less slowed down.
def save_video(queue, filename, fps):
writer = imageio.get_writer(filename, fps=fps)
while True:
frame = queue.get()
if frame is None:
break
writer.append_data(frame)
writer.close()
if __name__ == '__main__':
env = PushObjectEnv(frame_skip=1)
env.reset()
# zeros = np.zeros(shape=[6])
# ones = np.ones(shape=[6])
for j in range(3):
# env.start_record_video()
# for i in range(3000):
# acts = np.random.normal(zeros, ones)
# _, _, done, _ = env.step(acts)
# env.render()
# if done:
# env.reset()
# env.stop_record_video()
for i in range(1500):
env.step([0., 0., 0., 0., 0., 0.])
env.render()
for i in range(1500):
# env.step([1., 1., 1., 1., 1., 1.])
env.step([0., 0., 1., 0., 0., 0.])
env.render()
# for i in range(1500):
# env.step([-1., -1., -1., -1., -1., -1.])
# # env.step([0., -1., -1., 0., 0., 0.])
# env.render()
| keven425/robot-learn | rl/environment/push_object.py | push_object.py | py | 15,646 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.utils.EzPickle",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "gym.utils",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
25047222226 | # -*-coding:utf-8 -*-
import os
import test
import functools
from unittest.loader import TestLoader
from baseCase.case import BaseTest
class BaseLoader(TestLoader):
def loadTestsFromTestCase(self, testCaseClass):
def isTestMethod(arr, testClass=testCaseClass):
return arr[:4].lower().startswith('test') and callable(
getattr(testClass, arr)) or arr == "runTest" and callable(getattr(testClass, arr))
testFnNames = list(filter(isTestMethod, dir(testCaseClass)))
if self.sortTestMethodsUsing:
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
loaded_suite = self.suiteClass(map(testCaseClass, testFnNames))
return loaded_suite
def loadTestsFromModule(self, module, *args, pattern=None, **kws):
dir_name = os.listdir(os.path.dirname(module.__file__))
tests1 = [n[:-3] for n in dir_name if n.lower().startswith("test")]
[__import__("".join([str(module.__package__), '.', i]), fromlist=True) for i in tests1]
itests = self.suiteClass(map(self.loadTestsFromTestCase, BaseTest.__subclasses__()))
return itests
baseLoder = BaseLoader()
if __name__ == "__main__":
b = BaseLoader()
s = b.loadTestsFromModule(test)
print(s)
# unittest.TextTestRunner(verbosity=2).run(s) | xiaoyaojushi/appium_auto_test | baseCase/baseSuite.py | baseSuite.py | py | 1,328 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.loader.TestLoader",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "functools.cmp_to_key",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.di... |
26714078036 | import os
import numpy as np
import keras
from keras.engine.topology import Layer
from keras.models import Model
from keras.layers import Input, Flatten, Dense, Lambda, Reshape, Concatenate
from keras.layers import Activation, LeakyReLU, ELU
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D, BatchNormalization
from keras.optimizers import Adam
from keras import backend as K
from .cond_base import CondBaseModel
from .layers import *
from .utils import *
def sample_normal(args):
z_avg, z_log_var = args
batch_size = K.shape(z_avg)[0]
z_dims = K.shape(z_avg)[1]
eps = K.random_normal(shape=(batch_size, z_dims), mean=0.0, stddev=1.0)
return z_avg + K.exp(z_log_var / 2.0) * eps
def zero_loss(y_true, y_pred):
return K.zeros_like(y_true)
class ClassifierLossLayer(Layer):
__name__ = 'classifier_loss_layer'
def __init__(self, **kwargs):
self.is_placeholder = True
super(ClassifierLossLayer, self).__init__(**kwargs)
def lossfun(self, c_true, c_pred):
return K.mean(keras.metrics.categorical_crossentropy(c_true, c_pred))
def call(self, inputs):
c_true = inputs[0]
c_pred = inputs[1]
loss = self.lossfun(c_true, c_pred)
self.add_loss(loss, inputs=inputs)
return c_true
class DiscriminatorLossLayer(Layer):
__name__ = 'discriminator_loss_layer'
def __init__(self, **kwargs):
self.is_placeholder = True
super(DiscriminatorLossLayer, self).__init__(**kwargs)
def lossfun(self, y_real, y_fake_f, y_fake_p):
y_pos = K.ones_like(y_real)
y_neg = K.zeros_like(y_real)
loss_real = keras.metrics.binary_crossentropy(y_pos, y_real)
loss_fake_f = keras.metrics.binary_crossentropy(y_neg, y_fake_f)
loss_fake_p = keras.metrics.binary_crossentropy(y_neg, y_fake_p)
return K.mean(loss_real + loss_fake_f + loss_fake_p)
def call(self, inputs):
y_real = inputs[0]
y_fake_f = inputs[1]
y_fake_p = inputs[2]
loss = self.lossfun(y_real, y_fake_f, y_fake_p)
self.add_loss(loss, inputs=inputs)
return y_real
class GeneratorLossLayer(Layer):
__name__ = 'generator_loss_layer'
def __init__(self, **kwargs):
self.is_placeholder = True
super(GeneratorLossLayer, self).__init__(**kwargs)
def lossfun(self, x_r, x_f, f_D_x_f, f_D_x_r, f_C_x_r, f_C_x_f):
loss_x = K.mean(K.square(x_r - x_f))
loss_d = K.mean(K.square(f_D_x_r - f_D_x_f))
loss_c = K.mean(K.square(f_C_x_r - f_C_x_f))
return loss_x + loss_d + loss_c
def call(self, inputs):
x_r = inputs[0]
x_f = inputs[1]
f_D_x_r = inputs[2]
f_D_x_f = inputs[3]
f_C_x_r = inputs[4]
f_C_x_f = inputs[5]
loss = self.lossfun(x_r, x_f, f_D_x_r, f_D_x_f, f_C_x_r, f_C_x_f)
self.add_loss(loss, inputs=inputs)
return x_r
class FeatureMatchingLayer(Layer):
__name__ = 'feature_matching_layer'
def __init__(self, **kwargs):
self.is_placeholder = True
super(FeatureMatchingLayer, self).__init__(**kwargs)
def lossfun(self, f1, f2):
f1_avg = K.mean(f1, axis=0)
f2_avg = K.mean(f2, axis=0)
return 0.5 * K.mean(K.square(f1_avg - f2_avg))
def call(self, inputs):
f1 = inputs[0]
f2 = inputs[1]
loss = self.lossfun(f1, f2)
self.add_loss(loss, inputs=inputs)
return f1
class KLLossLayer(Layer):
__name__ = 'kl_loss_layer'
def __init__(self, **kwargs):
self.is_placeholder = True
super(KLLossLayer, self).__init__(**kwargs)
def lossfun(self, z_avg, z_log_var):
kl_loss = -0.5 * K.mean(1.0 + z_log_var - K.square(z_avg) - K.exp(z_log_var))
return kl_loss
def call(self, inputs):
z_avg = inputs[0]
z_log_var = inputs[1]
loss = self.lossfun(z_avg, z_log_var)
self.add_loss(loss, inputs=inputs)
return z_avg
def discriminator_accuracy(x_r, x_f, x_p):
def accfun(y0, y1):
x_pos = K.ones_like(x_r)
x_neg = K.zeros_like(x_r)
loss_r = K.mean(keras.metrics.binary_accuracy(x_pos, x_r))
loss_f = K.mean(keras.metrics.binary_accuracy(x_neg, x_f))
loss_p = K.mean(keras.metrics.binary_accuracy(x_neg, x_p))
return (1.0 / 3.0) * (loss_r + loss_p + loss_f)
return accfun
def generator_accuracy(x_p, x_f):
def accfun(y0, y1):
x_pos = K.ones_like(x_p)
loss_p = K.mean(keras.metrics.binary_accuracy(x_pos, x_p))
loss_f = K.mean(keras.metrics.binary_accuracy(x_pos, x_f))
return 0.5 * (loss_p + loss_f)
return accfun
class CVAEGAN(CondBaseModel):
def __init__(self,
input_shape=(64, 64, 3),
num_attrs=40,
z_dims = 128,
name='cvaegan',
**kwargs
):
super(CVAEGAN, self).__init__(input_shape=input_shape, name=name, **kwargs)
self.input_shape = input_shape
self.num_attrs = num_attrs
self.z_dims = z_dims
self.f_enc = None
self.f_dec = None
self.f_dis = None
self.f_cls = None
self.enc_trainer = None
self.dec_trainer = None
self.dis_trainer = None
self.cls_trainer = None
self.build_model()
def train_on_batch(self, x_batch):
x_r, c = x_batch
batchsize = len(x_r)
z_p = np.random.normal(size=(batchsize, self.z_dims)).astype('float32')
x_dummy = np.zeros(x_r.shape, dtype='float32')
c_dummy = np.zeros(c.shape, dtype='float32')
z_dummy = np.zeros(z_p.shape, dtype='float32')
y_dummy = np.zeros((batchsize, 1), dtype='float32')
f_dummy = np.zeros((batchsize, 8192), dtype='float32')
# Train autoencoder
self.enc_trainer.train_on_batch([x_r, c, z_p], [x_dummy, z_dummy])
# Train generator
g_loss, _, _, _, _, _, g_acc = self.dec_trainer.train_on_batch([x_r, c, z_p], [x_dummy, f_dummy, f_dummy])
# Train classifier
self.cls_trainer.train_on_batch([x_r, c], c_dummy)
# Train discriminator
d_loss, d_acc = self.dis_trainer.train_on_batch([x_r, c, z_p], y_dummy)
loss = {
'g_loss': g_loss,
'd_loss': d_loss,
'g_acc': g_acc,
'd_acc': d_acc
}
return loss
def predict(self, z_samples):
return self.f_dec.predict(z_samples)
def build_model(self):
self.f_enc = self.build_encoder(output_dims=self.z_dims*2)
self.f_dec = self.build_decoder()
self.f_dis = self.build_discriminator()
self.f_cls = self.build_classifier()
# Algorithm
x_r = Input(shape=self.input_shape)
c = Input(shape=(self.num_attrs,))
z_params = self.f_enc([x_r, c])
z_avg = Lambda(lambda x: x[:, :self.z_dims], output_shape=(self.z_dims,))(z_params)
z_log_var = Lambda(lambda x: x[:, self.z_dims:], output_shape=(self.z_dims,))(z_params)
z = Lambda(sample_normal, output_shape=(self.z_dims,))([z_avg, z_log_var])
kl_loss = KLLossLayer()([z_avg, z_log_var])
z_p = Input(shape=(self.z_dims,))
x_f = self.f_dec([z, c])
x_p = self.f_dec([z_p, c])
y_r, f_D_x_r = self.f_dis(x_r)
y_f, f_D_x_f = self.f_dis(x_f)
y_p, f_D_x_p = self.f_dis(x_p)
d_loss = DiscriminatorLossLayer()([y_r, y_f, y_p])
c_r, f_C_x_r = self.f_cls(x_r)
c_f, f_C_x_f = self.f_cls(x_f)
c_p, f_C_x_p = self.f_cls(x_p)
g_loss = GeneratorLossLayer()([x_r, x_f, f_D_x_r, f_D_x_f, f_C_x_r, f_C_x_f])
gd_loss = FeatureMatchingLayer()([f_D_x_r, f_D_x_p])
gc_loss = FeatureMatchingLayer()([f_C_x_r, f_C_x_p])
c_loss = ClassifierLossLayer()([c, c_r])
# Build classifier trainer
set_trainable(self.f_enc, False)
set_trainable(self.f_dec, False)
set_trainable(self.f_dis, False)
set_trainable(self.f_cls, True)
self.cls_trainer = Model(inputs=[x_r, c],
outputs=[c_loss])
self.cls_trainer.compile(loss=[zero_loss],
optimizer=Adam(lr=2.0e-4, beta_1=0.5))
self.cls_trainer.summary()
# Build discriminator trainer
set_trainable(self.f_enc, False)
set_trainable(self.f_dec, False)
set_trainable(self.f_dis, True)
set_trainable(self.f_cls, False)
self.dis_trainer = Model(inputs=[x_r, c, z_p],
outputs=[d_loss])
self.dis_trainer.compile(loss=[zero_loss],
optimizer=Adam(lr=2.0e-4, beta_1=0.5),
metrics=[discriminator_accuracy(y_r, y_f, y_p)])
self.dis_trainer.summary()
# Build generator trainer
set_trainable(self.f_enc, False)
set_trainable(self.f_dec, True)
set_trainable(self.f_dis, False)
set_trainable(self.f_cls, False)
self.dec_trainer = Model(inputs=[x_r, c, z_p],
outputs=[g_loss, gd_loss, gc_loss])
self.dec_trainer.compile(loss=[zero_loss, zero_loss, zero_loss],
optimizer=Adam(lr=2.0e-4, beta_1=0.5),
metrics=[generator_accuracy(y_p, y_f)])
# Build autoencoder
set_trainable(self.f_enc, True)
set_trainable(self.f_dec, False)
set_trainable(self.f_dis, False)
set_trainable(self.f_cls, False)
self.enc_trainer = Model(inputs=[x_r, c, z_p],
outputs=[g_loss, kl_loss])
self.enc_trainer.compile(loss=[zero_loss, zero_loss],
optimizer=Adam(lr=2.0e-4, beta_1=0.5))
self.enc_trainer.summary()
# Store trainers
self.store_to_save('cls_trainer')
self.store_to_save('dis_trainer')
self.store_to_save('dec_trainer')
self.store_to_save('enc_trainer')
def build_encoder(self, output_dims):
x_inputs = Input(shape=self.input_shape)
c_inputs = Input(shape=(self.num_attrs,))
c = Reshape((1, 1, self.num_attrs))(c_inputs)
c = UpSampling2D(size=self.input_shape[:2])(c)
x = Concatenate(axis=-1)([x_inputs, c])
x = BasicConvLayer(filters=128, strides=(2, 2))(x)
x = BasicConvLayer(filters=256, strides=(2, 2))(x)
x = BasicConvLayer(filters=256, strides=(2, 2))(x)
x = BasicConvLayer(filters=512, strides=(2, 2))(x)
x = Flatten()(x)
x = Dense(1024)(x)
x = Activation('relu')(x)
x = Dense(output_dims)(x)
x = Activation('linear')(x)
return Model([x_inputs, c_inputs], x)
def build_decoder(self):
z_inputs = Input(shape=(self.z_dims,))
c_inputs = Input(shape=(self.num_attrs,))
z = Concatenate()([z_inputs, c_inputs])
w = self.input_shape[0] // (2 ** 4)
x = Dense(w * w * 512)(z)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Reshape((w, w, 512))(x)
x = BasicDeconvLayer(filters=512, strides=(2, 2))(x)
x = BasicDeconvLayer(filters=256, strides=(2, 2))(x)
x = BasicDeconvLayer(filters=256, strides=(2, 2))(x)
x = BasicDeconvLayer(filters=128, strides=(2, 2))(x)
d = self.input_shape[2]
x = BasicDeconvLayer(filters=d, strides=(1, 1), bnorm=False, activation='tanh')(x)
return Model([z_inputs, c_inputs], x)
def build_discriminator(self):
inputs = Input(shape=self.input_shape)
x = BasicConvLayer(filters=128, strides=(2, 2))(inputs)
x = BasicConvLayer(filters=256, strides=(2, 2))(x)
x = BasicConvLayer(filters=256, strides=(2, 2))(x)
x = BasicConvLayer(filters=512, strides=(2, 2))(x)
f = Flatten()(x)
x = Dense(1024)(f)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('sigmoid')(x)
return Model(inputs, [x, f])
def build_classifier(self):
inputs = Input(shape=self.input_shape)
x = BasicConvLayer(filters=128, strides=(2, 2))(inputs)
x = BasicConvLayer(filters=256, strides=(2, 2))(x)
x = BasicConvLayer(filters=256, strides=(2, 2))(x)
x = BasicConvLayer(filters=512, strides=(2, 2))(x)
f = Flatten()(x)
x = Dense(1024)(f)
x = Activation('relu')(x)
x = Dense(self.num_attrs)(x)
x = Activation('softmax')(x)
return Model(inputs, [x, f])
| tatsy/keras-generative | models/cvaegan.py | cvaegan.py | py | 12,613 | python | en | code | 123 | github-code | 1 | [
{
"api_name": "keras.backend.shape",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "keras.backend.shape",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keras.backend",... |
43200983306 | from faker import Faker
from git_class.models_new.database import create_db, Session
from git_class.models_new.car import Car
from git_class.models_new.info_car import InfoCar
def create_database(load_fake_data=True):
create_db()
if load_fake_data:
_load_fake_data(Session())
def _load_fake_data(session):
car_names = ['BMW', 'Audi', 'Mercedes', 'Lada', 'Opel', 'Ford', 'Honda', 'Lexus']
for i in car_names:
cars_n = Car(model=i)
session.add(cars_n)
session.commit()
faker = Faker('ru_RU')
for i in range(len(car_names)):
colors = faker.color()
forces = faker.random.randint(110, 250)
names = faker.name()
info = InfoCar(colors, forces, names)
session.add(info)
session.commit()
session.close()
| Mil6734/git_class | Python2/dz38/create_base.py | create_base.py | py | 801 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "git_class.models_new.database.create_db",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "git_class.models_new.database.Session",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "git_class.models_new.car.Car",
"line_number": 17,
"usage_typ... |
3740386075 | import os
import torch
import torch.nn.functional as F
import glob
import imageio
import numpy as np
from utils.data_utils import get_image_to_tensor, get_mask_to_tensor
class DVRDataset(torch.utils.data.Dataset):
def __init__(self,
args,
mode,
list_prefix="softras_",
image_size=None,
scale_focal=True,
max_imgs=100000,
z_near=1.2,
z_far=4.0):
super().__init__()
self.args = args
self.mode = mode
self.base_dir = args.datadir
use_source_lut = True if args.src_view is not None else False
if use_source_lut:
print("Using views from list", args.src_view)
with open(args.src_view, "r") as f:
tmp = [x.strip().split() for x in f.readlines()]
self.source_lut = {
x[0] + "/" + x[1]: torch.tensor(list(map(int, x[2:])), dtype=torch.long)
for x in tmp
}
categories = [x for x in glob.glob(os.path.join(self.base_dir, "*")) if os.path.isdir(x)]
obj_lists = [os.path.join(x, list_prefix + '{}.lst'.format(mode)) for x in categories]
all_objs = []
for obj_list in obj_lists:
if not os.path.exists(obj_list):
continue
category_dir = os.path.dirname(obj_list)
category = os.path.basename(category_dir)
with open(obj_list, "r") as f:
objs = [(category, os.path.join(category_dir, x.strip())) for x in f.readlines()]
all_objs.extend(objs)
self.all_objs = all_objs
self.image_to_tensor = get_image_to_tensor()
self.mask_to_tensor = get_mask_to_tensor()
print("[Info] Loading DVR dataset: {}, mode: {}, "
"type: {}, {} objects in total".format(self.base_dir,
mode,
"ShapeNet",
len(self.all_objs)))
self.image_size = image_size
self._coord_trans = torch.diag(torch.tensor([1, -1, -1, 1], dtype=torch.float32))
self.scale_focal = scale_focal
self.max_imgs = max_imgs
self.z_near = z_near
self.z_far = z_far
self.lindisp = False
def __len__(self):
return len(self.all_objs)
def __getitem__(self, index):
category, obj_dir = self.all_objs[index]
rgb_paths = [x for x in glob.glob(os.path.join(obj_dir, "image", "*"))
if x.endswith(".jpg") or x.endswith(".png")]
rgb_paths = sorted(rgb_paths)
mask_paths = sorted(glob.glob(os.path.join(obj_dir, "mask", "*.png")))
if len(mask_paths) == 0:
mask_paths = [None] * len(rgb_paths)
if len(rgb_paths) <= self.max_imgs:
sel_indices = np.arange(len(rgb_paths))
else:
sel_indices = np.random.choice(len(rgb_paths), self.max_imgs, replace=False)
rgb_paths = [rgb_paths[i] for i in sel_indices]
mask_paths = [mask_paths[i] for i in sel_indices]
cam_path = os.path.join(obj_dir, "cameras.npz")
all_cam = np.load(cam_path)
all_imgs = []
all_poses = []
all_masks = []
all_bboxes = []
focal = None
for idx, (rgb_path, mask_path) in enumerate(zip(rgb_paths, mask_paths)):
i = sel_indices[idx]
img = imageio.imread(rgb_path)[..., :3]
if self.scale_focal:
x_scale = img.shape[1] / 2.0
y_scale = img.shape[0] / 2.0
xy_delta = 1.0
else:
x_scale = y_scale = 1.0
xy_delta = 0.0
if mask_path is not None:
mask = imageio.imread(mask_path)
if len(mask.shape) == 2:
mask = mask[..., None]
mask = mask[..., :1]
# ShapeNet
wmat_inv_key = "world_mat_inv_" + str(i)
wmat_key = "world_mat_" + str(i)
if wmat_inv_key in all_cam:
extr_inv_mtx = all_cam[wmat_inv_key]
else:
extr_inv_mtx = all_cam[wmat_key]
if extr_inv_mtx.shape[0] == 3:
extr_inv_mtx = np.vstack((extr_inv_mtx, np.array([0, 0, 0, 1])))
extr_inv_mtx = np.linalg.inv(extr_inv_mtx)
intr_mtx = all_cam["camera_mat_" + str(i)]
fx, fy = intr_mtx[0, 0], intr_mtx[1, 1]
assert abs(fx - fy) < 1e-9
fx = fx * x_scale
if focal is None:
focal = fx
else:
assert abs(fx - focal) < 1e-5
pose = extr_inv_mtx
pose = torch.tensor(pose, dtype=torch.float32) @ self._coord_trans
img_tensor = self.image_to_tensor(img)
if mask_path is not None:
mask_tensor = self.mask_to_tensor(mask)
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
rnz = np.where(rows)[0]
cnz = np.where(cols)[0]
if len(rnz) == 0:
raise RuntimeError("ERROR: Bad image at", rgb_path, "please investigate!")
rmin, rmax = rnz[[0, -1]]
cmin, cmax = cnz[[0, -1]]
bbox = torch.tensor([cmin, rmin, cmax, rmax], dtype=torch.float32)
all_masks.append(mask_tensor)
all_bboxes.append(bbox)
all_imgs.append(img_tensor)
all_poses.append(pose)
if mask_path is not None:
all_bboxes = torch.stack(all_bboxes)
all_imgs = torch.stack(all_imgs)
all_poses = torch.stack(all_poses)
if len(all_masks) > 0:
all_masks = torch.stack(all_masks)
else:
all_masks = None
if self.image_size is not None and all_imgs.shape[-2:] != self.image_size:
scale = self.image_size[0] / all_imgs.shape[-2]
focal *= scale
if mask_path is not None:
all_bboxes *= scale
all_imgs = F.interpolate(all_imgs, size=self.image_size, mode="area")
if all_masks is not None:
all_masks = F.interpolate(all_masks, size=self.image_size, mode="area")
H, W = all_imgs.shape[-2:]
if self.args.src_view is None:
src_view = np.random.choice(len(rgb_paths), 1)[0]
else:
# src_view = int(self.args.src_view)
obj_id = category + "/" + os.path.basename(obj_dir)
src_view = self.source_lut[obj_id]
src_img = all_imgs[src_view:src_view+1]
src_pose = all_poses[src_view:src_view+1]
# src_mask = all_masks[src_view:src_view+1]
# src_bbox = all_bboxes[src_view:src_view+1]
src_path = rgb_paths[src_view]
K = np.array([[focal, 0., W // 2, 0.],
[0., focal, H // 2, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]]).astype(np.float32)
intrinsics = torch.from_numpy(K).view(1, 4, 4).repeat(len(all_imgs), 1, 1)
depth_range = torch.tensor([self.z_near, self.z_far], dtype=torch.float32).view(1, -1).repeat(len(all_imgs), 1)
ret = {
"obj_dir": obj_dir,
"category": category,
"index": index,
"intrinsics": intrinsics,
"rgb_paths": rgb_paths,
"images": all_imgs,
"poses": all_poses,
"depth_range": depth_range,
"src_view": src_view,
"src_path": src_path,
"src_image": src_img,
# "src_mask": src_mask,
"src_pose": src_pose,
# "src_bbox": src_bbox
}
if all_masks is not None:
ret["masks"] = all_masks
ret["bboxes"] = all_bboxes
return ret
| xingyi-li/SymmNeRF | code/datasets/dvr_dataset.py | dvr_dataset.py | py | 8,219 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "torch.utils",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_n... |
25846744127 | # -*- coding: utf-8 -*-
from contextlib import contextmanager
try:
from typing import Type
except ImportError: # Python 2.x
pass
import redis
import datetime
from bitmapist4 import events as ev
class Bitmapist(object):
"""
Core bitmapist object
"""
# Should hourly be tracked as default?
# Note that this can have huge implications in amounts
# of memory that Redis uses (especially with huge integers)
track_hourly = False
# Should unique events be tracked as default?
track_unique = False
def __init__(self,
connection_or_url=redis.StrictRedis(),
track_hourly=False,
track_unique=True,
finished_ops_expire=3600 * 24,
unfinished_ops_expire=60,
key_prefix='bitmapist_'):
if isinstance(connection_or_url, redis.StrictRedis):
self.connection = connection_or_url
else:
self.connection = redis.StrictRedis.from_url(connection_or_url)
self.track_hourly = track_hourly
self.track_unique = track_unique
self.finished_ops_expire = finished_ops_expire
self.unfinished_ops_expire = unfinished_ops_expire
self.key_prefix = key_prefix
self.pipe = None
kw = {'bitmapist': self}
self.UniqueEvents = type('UniqueEvents', (ev.UniqueEvents, ),
kw) # type: Type[ev.UniqueEvents]
self.YearEvents = type('YearEvents', (ev.YearEvents, ),
kw) # type: Type[ev.YearEvents]
self.MonthEvents = type('MonthEvents', (ev.MonthEvents, ),
kw) # type: Type[ev.MonthEvents]
self.WeekEvents = type('WeekEvents', (ev.WeekEvents, ),
kw) # type: Type[ev.WeekEvents]
self.DayEvents = type('DayEvents', (ev.DayEvents, ),
kw) # type: Type[ev.DayEvents]
self.HourEvents = type('HourEvents', (ev.HourEvents, ),
kw) # type: Type[ev.HourEvents]
self.BitOpAnd = type('BitOpAnd', (ev.BitOpAnd, ),
kw) # type: Type[ev.BitOpAnd]
self.BitOpOr = type('BitOpOr', (ev.BitOpOr, ),
kw) # type: Type[ev.BitOpOr]
self.BitOpXor = type('BitOpXor', (ev.BitOpXor, ),
kw) # type: Type[ev.BitOpXor]
self.BitOpNot = type('BitOpNot', (ev.BitOpNot, ),
kw) # type: Type[ev.BitOpNot]
def mark_event(self,
event_name,
uuid,
timestamp=None,
track_hourly=None,
track_unique=None):
"""
Marks an event as "happened" for a specific moment. The function
stores the event for the day, week and month, and optionally
for the hour, as well as the unique event.
- event_name is the name of the event to track
- uuid is the unique id of the subject (typically user id). The id
should not be huge
- timestamp is an optional moment of time which date should be used as
a reference point, default is to `datetime.utcnow()`
Examples:
# Mark id 1 as active
b.mark_event('active', 1)
# Mark task completed for id 252
b.mark_event('tasks:completed', 252)
"""
self._mark(event_name, uuid, timestamp, 1, track_hourly, track_unique)
def unmark_event(self,
event_name,
uuid,
timestamp=None,
track_hourly=None,
track_unique=None):
"""
Marks an event as "not happened" for a specific moment. The function
stores the event for the day, week and month, and optionally
for the hour, as well as the unique event.
"""
self._mark(event_name, uuid, timestamp, 0, track_hourly, track_unique)
def _mark(self, event_name, uuid, timestamp, value, track_hourly,
track_unique):
if timestamp is None:
timestamp = datetime.datetime.utcnow()
if track_hourly is None:
track_hourly = self.track_hourly
if track_unique is None:
track_unique = self.track_unique
obj_classes = [self.MonthEvents, self.WeekEvents, self.DayEvents]
if track_hourly:
obj_classes.append(self.HourEvents)
if track_unique:
obj_classes.append(self.UniqueEvents)
if self.pipe is None:
pipe = self.connection.pipeline()
else:
pipe = self.pipe
for obj_class in obj_classes:
pipe.setbit(
obj_class.from_date(event_name, timestamp).redis_key, uuid,
value)
if self.pipe is None:
pipe.execute()
def start_transaction(self):
if self.pipe is not None:
raise RuntimeError("Transaction already started")
self.pipe = self.connection.pipeline()
def commit_transaction(self):
if self.pipe is None:
raise RuntimeError("Transaction not started")
self.pipe.execute()
self.pipe = None
def rollback_transaction(self):
self.pipe = None
@contextmanager
def transaction(self):
self.start_transaction()
try:
yield
self.commit_transaction()
except:
self.rollback_transaction()
raise
def mark_unique(self, event_name, uuid):
"""
Mark unique event as "happened with a user"
Unique event (aka "user flag") is an event which doesn't depend on date.
Can be used for storing user properties, A/B testing, extra filtering,
etc.
- event_name: The name of the event, could be "active" or "new_signups"
- uuid: a unique id, typically user id. The id should not be huge
Example:
# Mark id 42 as premium
b.mark_unique('premium', 42)
"""
self._mark_unique(event_name, uuid, value=1)
def unmark_unique(self, event_name, uuid):
"""
Mark unique event as "not happened with a user"
Unique event (aka "user flag") is an event which doesn't depend on date.
Can be used for storing user properties, A/B testing, extra filtering,
etc.
- event_name: The name of the event, could be "active" or "new_signups"
- uuid: a unique id, typically user id. The id should not be huge
Example:
# Mark id 42 as premium
b.unmark_unique('premium', 42)
"""
self._mark_unique(event_name, uuid, value=0)
def _mark_unique(self, event_name, uuid, value):
conn = self.connection if self.pipe is None else self.pipe
redis_key = self.UniqueEvents(event_name).redis_key
conn.setbit(redis_key, uuid, value)
def get_event_names(self, prefix='', batch=10000):
"""
Return the list of all event names, with no particular order. Optional
`prefix` value is used to filter only subset of keys
"""
expr = '{}{}*'.format(self.key_prefix, prefix)
ret = set()
for result in self.connection.scan_iter(match=expr, count=batch):
result = result.decode()
chunks = result.split('_')
event_name = '_'.join(chunks[1:-1])
if not event_name.startswith('bitop_'):
ret.add(event_name)
return sorted(ret)
def delete_all_events(self):
"""
Delete all events from the database.
"""
keys = self.connection.keys('{}*'.format(self.key_prefix))
if keys:
self.connection.delete(*keys)
def delete_temporary_bitop_keys(self):
"""
Delete all temporary keys that are used when using bit operations.
"""
keys = self.connection.keys('{}bitop_*'.format(self.key_prefix))
if keys:
self.connection.delete(*keys)
def prefix_key(self, event_name, date):
return '{}{}_{}'.format(self.key_prefix, event_name, date)
| Doist/bitmapist4 | bitmapist4/core.py | core.py | py | 8,248 | python | en | code | 21 | github-code | 1 | [
{
"api_name": "redis.StrictRedis",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "redis.StrictRedis",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "redis.StrictRedis.from_url",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "r... |
10813165237 | import flask
from flask import Flask,request , jsonify
from xyz import AddTwo as ad
app = Flask(__name__)
@ app.route('/')
def test():
return jsonify({"status":"ok"})
@ app.route('/parsename',methods=['GET'])
def test_name():
var_name = request.args.get("name")
return jsonify({"Entered name = ": var_name})
@ app.route('/twopara',methods=['GET'])
def two_para():
n=request.args.get('name')
r=request.args.get('rollno')
output="Roll No{}has the name:{}".format(r,n)
return output
@ app.route('/addtwo',methods=['GET'])
def add_two_num():
fno = request.args.get("fn")
sno = request.args.get("sn")
a = ad()
s = a.add_Two(fno,sno)
return "Sum of {} and {} is : {}".format(fno,sno,s)
if __name__ == '__main__':
app.run(port=50001) | SaketJNU/software_engineering | rcdu_2750_practicals/rcdu_2750_flask.py | rcdu_2750_flask.py | py | 795 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
... |
45121110514 | # -*- coding: utf-8 -*-
import sys
import os
import json
import argparse
def print_rank_0(*args, **kwargs):
rank = int(os.getenv("RANK", "0"))
if rank == 0:
print(*args, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("module_name", type=str)
parser.add_argument("--output_preLen", type=int, default=20)
parser.add_argument("--only-oneflow", action="store_true")
parser.add_argument("--only-pytorch", action="store_true")
args = parser.parse_args()
with open("input_text.json", encoding="utf-8") as f:
dict_input = json.load(f)
input = {
"txt": dict_input[str(args.output_preLen)],
"preLen": args.output_preLen # 预测的长度
}
if not args.only_pytorch:
sys.path.append('/path/to/writer/AI_Writer_Web')
from AI_Writer_Web.infer import Writer as Writer_Oneflow
writer_oneflow = Writer_Oneflow()
output_oneflow = writer_oneflow.inference(input)
oneflow_time = output_oneflow["time"]
#print(output_oneflow["txt"])
print_rank_0(
f"OneFlow {args.module_name} time: {oneflow_time:.8f}s , output_preLen={args.output_preLen}"
)
if not args.only_oneflow:
sys.path.append('/path/to/writer/AI_Writer')
from AI_Writer.infer import Writer as Writer_Pytorch
writer_pytorch = Writer_Pytorch()
output_pytorch = writer_pytorch.inference(input)
pytorch_time = output_pytorch["time"]
#print(output_pytorch["txt"])
print_rank_0(
f"PyTorch {args.module_name} time: {pytorch_time:.8f}s , output_preLen={args.output_preLen}"
)
if not args.only_pytorch and not args.only_oneflow:
relative_speed = pytorch_time / oneflow_time
print_rank_0(
f"Relative speed: {relative_speed:.8f} (= {pytorch_time:.8f}s / {oneflow_time:.8f}s)"
)
| Oneflow-Inc/OneAutoTest | eager/AI_Writer/compare_speed_with_pytorch.py | compare_speed_with_pytorch.py | py | 1,978 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"lin... |
19578680978 | import os
from flask import Blueprint, request, make_response
from werkzeug.utils import secure_filename
from uploadFileTask import handle_file
from ..models import db,Products
import pandas as pd
#from uploadFileTask import handle_file
from multiprocessing.pool import ThreadPool as Pool
upload_products = Blueprint('upload', __name__,
template_folder='templates')
t = Pool(processes=20)
@upload_products.route('/upload', methods=['POST'])
def upload():
# Remember the paramName was set to 'file', we can use that here to grab it
file = request.files['file']
filename = secure_filename(file.filename)
print(filename)
# secure_filename makes sure the filename isn't unsafe to save
save_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), secure_filename(file.filename))
current_chunk = int(request.form['dzchunkindex'])
print("chunck ",current_chunk)
print("chunk total count",)
if os.path.exists(save_path) and current_chunk == 0:
os.remove(save_path)
with open(save_path, 'ab') as f:
# Goto the offset, aka after the chunks we already wrote
f.seek(int(request.form['dzchunkbyteoffset']))
data=file.stream.read()
f.write(data)
if current_chunk ==(int(request.form['dztotalchunkcount'])-1):
#t1 = threading.Thread(target=store_data_in_db, args=(save_path,db))
#t1.start()
# t1.join()
#store_data_in_db(save_path)
# t.join()
# t.close()
#handle_file.send(save_path)
handle_file(save_path)
return make_response(('Chunk', 200))
def handle_file(save_path):
f = get_next_record(save_path)
for i in f:
t.map(store_in_db, (i,))
def get_next_record(file):
df = pd.read_csv(file, sep=',')
print(df.head(5))
df=df.drop_duplicates(subset='sku', keep='first')
inc=j=int(df.shape[0]/100)
i=0
for _ in range(100):
yield df[i:j]
i+=inc
j+=inc
def store_in_db(dataset):
products=[]
#dataset.drop_duplicates(subset='sku', keep="last")
for index, data in dataset.iterrows():
products.append(Products(name=data[0], sku=data[1], description=data[2]))
#db.session.merge(record)
try:
db.session.bulk_save_objects(products)
db.session.commit()
except Exception as e:
print(e)
def store_data_in_db(file,db):
Products.query.delete()
db.session.commit()
import pandas as pd
df = pd.read_csv(file, sep=',', header=None)
try:
for index, row in df.iterrows():
record = Products(name=row[0], sku=row[1], description=row[2])
db.session.merge(record)
except Exception as e :
print(e)
db.session.commit()
| saxenakartik007/ACME | product_importer/routes/uploadProducts.py | uploadProducts.py | py | 2,808 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "multiprocessing.pool.ThreadPool",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.files",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name... |
1155231342 | import pygame
class Ship:
def __init__(self, screen):
self.character = pygame.image.load('hw_images/ship_0009.png')
self.character_rect = self.character.get_rect()
self.screen_rect = screen.get_rect()
self.character_rect.center = self.screen_rect.center
self.ship_speed = 1.0
self.x = float(self.character_rect.x)
self.y = float(self.character_rect.y)
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def draw(self):
screen.blit(self.character, self.character_rect)
def update(self):
if self.moving_right and self.character_rect.right < self.screen_rect.right:
self.x += self.ship_speed
if self.moving_left and self.character_rect.left > 0:
self.x -= self.ship_speed
if self.moving_up and self.character_rect.top > 0:
self.y -= self.ship_speed
if self.moving_down and self.character_rect.bottom < self.screen_rect.bottom:
self.y += self.ship_speed
self.character_rect.x = self.x
self.character_rect.y = self.y
pygame.init()
screen = pygame.display.set_mode((500, 500))
ship = Ship(screen)
screen.fill((181, 255, 255))
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_UP:
ship.moving_up = True
elif event.key == pygame.K_DOWN:
ship.moving_down = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
elif event.key == pygame.K_UP:
ship.moving_up = False
elif event.key == pygame.K_DOWN:
ship.moving_down = False
ship.update()
screen.fill((181, 255, 255))
ship.draw()
pygame.display.flip()
| m251434/alien_invasion | homework/AI_1/rocket.py | rocket.py | py | 2,184 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.image.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode... |
9628500098 | from django.conf.urls import include, url
from django.views.generic import RedirectView
from . import views
person = [
url(r'^$', views.PersonList.as_view(), name='person-list'),
url(r'^new/$', views.PersonCreate.as_view(), name='person-create'),
url(r'^(?P<pk>[^/]+)/$', views.PersonDetail.as_view(), name='person-detail'),
url(r'^(?P<pk>[^/]+)/delete/$', views.PersonDelete.as_view(), name='person-delete'),
url(r'^(?P<pk>[^/]+)/edit/$', views.PersonEdit.as_view(), name='person-edit'),
url(r'^(?P<pk>[^/]+)/volunteer/$', views.VolunteerCreate.as_view(), name='volunteer-create'),
url(r'^(?P<pk>[^/]+)/recipient/$', views.RecipientCreate.as_view(), name='recipient-create'),
url(r'^(?P<pk>[^/]+)/member/$', views.MemberCreate.as_view(), name='member-create'),
url(r'^(?P<pk>[^/]+)/membership/$', views.MembershipCreate.as_view(), name='membership-create'),
]
recipient = [
url(r'^$', views.RecipientList.as_view(), name='recipient-list'),
url(r'^(?P<pk>\d+)/$', views.RecipientDetail.as_view(), name='recipient-detail'),
url(r'^(?P<pk>\d+)/delete/$', views.RecipientDelete.as_view(), name='recipient-delete'),
url(r'^(?P<pk>\d+)/edit/$', views.RecipientEdit.as_view(), name='recipient-edit'),
url(r'^(?P<pk>\d+)/custodian/$', views.CustodianCreate.as_view(), name='custodian-create'),
]
volunteer = [
url(r'^$', views.VolunteerList.as_view(), name='volunteer-list'),
url(r'^(?P<pk>\d+)/$', views.VolunteerDetail.as_view(), name='volunteer-detail'),
url(r'^(?P<pk>\d+)/delete/$', views.VolunteerDelete.as_view(), name='volunteer-delete'),
url(r'^(?P<pk>\d+)/edit/$', views.VolunteerEdit.as_view(), name='volunteer-edit'),
]
member = [
url(r'^$', views.MemberList.as_view(), name='member-list'),
url(r'^(?P<pk>\d+)/$', views.MemberDetail.as_view(), name='member-detail'),
url(r'^(?P<pk>\d+)/edit/$', views.MemberEdit.as_view(), name='member-edit'),
]
custodian = [
url(r'^(?P<pk>\d+)/$', views.CustodianDetail.as_view(), name='custodian-detail'),
url(r'^(?P<pk>\d+)/edit/$', views.CustodianEdit.as_view(), name='custodian-edit'),
]
membership = [
url(r'^(?P<pk>\d+)/$', views.MembershipDetail.as_view(), name='membership-detail'),
url(r'^(?P<pk>\d+)/edit/$', views.MembershipEdit.as_view(), name='membership-edit'),
]
project = [
url(r'^$', views.ProjectList.as_view(), name='project-list'),
url(r'^(?P<pk>\d+)/$', views.ProjectDetail.as_view(), name='project-detail'),
url(r'^(?P<pk>\d+)/edit/$', views.ProjectEdit.as_view(), name='project-edit'),
url(r'^new/$', views.ProjectCreate.as_view(), name='project-create'),
]
group = [
url(r'^$', views.GroupList.as_view(), name='group-list'),
url(r'^(?P<pk>\d+)/$', views.GroupDetail.as_view(), name='group-detail'),
url(r'^(?P<pk>\d+)/edit/$', views.GroupEdit.as_view(), name='group-edit'),
url(r'^new/$', views.GroupCreate.as_view(), name='group-create'),
]
event = [
url(r'^$', views.EventList.as_view(), name='event-list'),
url(r'^(?P<pk>\d+)/$', views.EventDetail.as_view(), name='event-detail'),
url(r'^(?P<pk>\d+)/edit/$', views.EventEdit.as_view(), name='event-edit'),
url(r'^new/$', views.EventCreate.as_view(), name='event-create'),
]
ajax = [
url(r'^person/', views.AjaxPersonList.as_view()),
]
urlpatterns = [
url(r'^ajax/', include(ajax)),
url(r'^person/', include(person)),
url(r'^recipient/', include(recipient)),
url(r'^volunteer/', include(volunteer)),
url(r'^member/', include(member)),
url(r'^custodian/', include(custodian)),
url(r'^membership/', include(membership)),
url(r'^group/', include(group)),
url(r'^project/', include(project)),
url(r'^event/', include(event)),
url(r'^$', RedirectView.as_view(url='login/')),
url(r'^missing_doc/$', views.missing_doc, name='missing_doc'),
url(r'^login/$', views.Login.as_view(), name='login'),
url(r'^logout/$', views.Logout.as_view(), name='logout'),
url(r'^home/$', views.Home.as_view(), name='home'),
url(r'^basicformnewperson/$', views.NewIndividualMember.as_view(), name='basicformnewperson'),
url(r'^basicformnewfamily/$', views.NewFamilyMember.as_view(), name='basicformnewfamily'),
url(r'^membership/$', views.MembershipList.as_view(), name='membership-list'),
url(r'^membership/(?P<pk>[^/]+)/$', views.MembershipDetail.as_view(), name='membership-detail'),
]
| abertal/alpha | webapp/urls.py | urls.py | py | 4,421 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.co... |
4566060055 | from datetime import datetime, timedelta
from unittest.mock import AsyncMock
from framework.clients.cache_client import CacheClientAsync
from framework.di.service_collection import ServiceCollection
from clients.azure_gateway_client import AzureGatewayClient
from clients.email_gateway_client import EmailGatewayClient
from clients.twilio_gateway import TwilioGatewayClient
from tests.buildup import ApplicationBase
class TwilioGatewayClientTests(ApplicationBase):
def configure_services(self, service_collection: ServiceCollection):
self.mock_cache_client = AsyncMock()
def get_mock_cache_client(services):
return self.mock_cache_client
service_collection.add_singleton(
dependency_type=CacheClientAsync,
factory=get_mock_cache_client)
def get_client(
self
) -> TwilioGatewayClient:
self.mock_cache_client.get_cache.return_value = None
return self.resolve(TwilioGatewayClient)
# async def test_send_sms(self):
# client = self.get_client()
# response = await client.send_sms(
# recipient='+18563323608',
# message='test')
# self.assertIsNotNone(response)
class EmailGatewayClientTests(ApplicationBase):
def configure_services(self, service_collection: ServiceCollection):
self.mock_cache_client = AsyncMock()
def get_mock_cache_client(services):
return self.mock_cache_client
service_collection.add_singleton(
dependency_type=CacheClientAsync,
factory=get_mock_cache_client)
def get_client(
self
) -> EmailGatewayClient:
self.mock_cache_client.get_cache.return_value = None
return self.resolve(EmailGatewayClient)
async def test_send_email(self):
client = self.get_client()
response = await client.send_email(
subject='Test',
recipient='me@dan-leonard.com',
message='Test')
self.assertIsNotNone(response)
async def test_send_datatable_email(self):
client = self.get_client()
data = [{'row': 'value'}]
response = await client.send_datatable_email(
subject='Test',
recipient='me@dan-leonard.com',
data=data)
self.assertIsNotNone(response)
async def test_send_json_email(self):
client = self.get_client()
data = [{'row': 'value'}]
response = await client.send_json_email(
subject='Test',
recipient='me@dan-leonard.com',
data=data)
self.assertIsNotNone(response)
class AzureGatewayClientTests(ApplicationBase):
def configure_services(self, service_collection: ServiceCollection):
self.mock_cache_client = AsyncMock()
def get_mock_cache_client(services):
return self.mock_cache_client
service_collection.add_singleton(
dependency_type=CacheClientAsync,
factory=get_mock_cache_client)
def get_client(
self
) -> AzureGatewayClient:
self.mock_cache_client.get_cache.return_value = None
return self.resolve(AzureGatewayClient)
async def test_get_pod_images(self):
client = self.get_client()
response = await client.get_pod_images()
self.assertIsNotNone(response)
async def test_get_acr_manifests(self):
client = self.get_client()
response = await client.acr_get_manifests(
repository_name='framework')
self.assertIsNotNone(response)
async def test_get_repositories(self):
client = self.get_client()
response = await client.acr_get_repositories()
self.assertIsNotNone(response)
async def test_get_cost_management_data(self):
client = self.get_client()
start_date = datetime.now() - timedelta(days=1)
end_date = datetime.now()
response = await client.get_cost_management_data(
start_date=start_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d'))
self.assertIsNotNone(response)
async def test_get_pods(self):
client = self.get_client()
response = await client.get_pods()
self.assertIsNotNone(response)
async def test_get_logs(self):
client = self.get_client()
pods_response = await client.get_pods()
pods = pods_response.get('pods')
response = await client.get_logs(
namespace=pods[0].get('name'),
pod=pods[0].get('namespace'))
self.assertIsNotNone(response)
| danleonard-nj/kube-tools-api | services/kube-tools/tests/test_gateway_clients.py | test_gateway_clients.py | py | 4,621 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tests.buildup.ApplicationBase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "framework.di.service_collection.ServiceCollection",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 15,
"usage_type"... |
28852512367 | import tensorflow as tf
import matplotlib.pyplot as plt
cifar10=tf.keras.datasets.cifar10
(x_train,y_train),(x_test,y_test)=cifar10.load_data()
plt.imshow(x_train[0]) #绘制图片
plt.show()
print("x_train[0]:\n",x_train[0])
print(y_train)
print(x_test.shape)
| 1414003104/OldSheep_TensorFLow2.0_note | 13,卷积神经网络/CIfar10数据集.py | CIfar10数据集.py | py | 282 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tensorflow.keras",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplot... |
11211086355 | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import glob
import os
import shutil
required_conan_version = ">=1.32.0"
class VulkanValidationLayersConan(ConanFile):
name = "vulkan-validationlayers"
description = "Khronos official Vulkan validation layers for Windows, Linux, Android, and MacOS."
license = "Apache-2.0"
topics = ("conan", "vulkan-validation-layers", "vulkan", "validation-layers")
homepage = "https://github.com/KhronosGroup/Vulkan-ValidationLayers"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"with_wsi_xcb": [True, False],
"with_wsi_xlib": [True, False],
"with_wsi_wayland": [True, False]
}
default_options = {
"with_wsi_xcb": True,
"with_wsi_xlib": True,
"with_wsi_wayland": True
}
short_paths = True
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os != "Linux":
del self.options.with_wsi_xcb
del self.options.with_wsi_xlib
del self.options.with_wsi_wayland
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
if self.settings.compiler == "gcc" and tools.Version(self.settings.compiler.version) < "5":
raise ConanInvalidConfiguration("gcc < 5 is not supported")
def requirements(self):
self.requires("spirv-tools/2020.5", private=True)
self.requires("vulkan-headers/{}".format(self.version))
if self.options.get_safe("with_wsi_xcb") or self.options.get_safe("with_wsi_xlib"):
self.requires("xorg/system")
if self.options.get_safe("with_wsi_wayland"):
self.requires("wayland/1.18.0")
def validate(self):
if self.options["spirv-tools"].shared:
raise ConanInvalidConfiguration("vulkan-validationlayers can't depend on shared spirv-tools")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = glob.glob("Vulkan-ValidationLayers-*")[0]
os.rename(extracted_dir, self._source_subfolder)
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
tools.replace_in_file(os.path.join(self._source_subfolder, "cmake", "FindVulkanHeaders.cmake"),
"HINTS ${VULKAN_HEADERS_INSTALL_DIR}/share/vulkan/registry",
"HINTS ${VULKAN_HEADERS_INSTALL_DIR}/res/vulkan/registry")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["VULKAN_HEADERS_INSTALL_DIR"] = self.deps_cpp_info["vulkan-headers"].rootpath
self._cmake.definitions["USE_CCACHE"] = False
if self.settings.os == "Linux":
self._cmake.definitions["BUILD_WSI_XCB_SUPPORT"] = self.options.with_wsi_xcb
self._cmake.definitions["BUILD_WSI_XLIB_SUPPORT"] = self.options.with_wsi_xlib
self._cmake.definitions["BUILD_WSI_WAYLAND_SUPPORT"] = self.options.with_wsi_wayland
self._cmake.definitions["BUILD_WERROR"] = False
self._cmake.definitions["BUILD_TESTS"] = False
self._cmake.definitions["INSTALL_TESTS"] = False
self._cmake.definitions["BUILD_LAYERS"] = True
self._cmake.definitions["BUILD_LAYER_SUPPORT_FILES"] = True
self._cmake.configure()
return self._cmake
def package(self):
self.copy("LICENSE.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
if self.settings.os == "Windows":
# import lib is useless, validation layers are loaded at runtime
lib_dir = os.path.join(self.package_folder, "lib")
tools.remove_files_by_mask(lib_dir, "VkLayer_khronos_validation.lib")
tools.remove_files_by_mask(lib_dir, "libVkLayer_khronos_validation.dll.a")
else:
# Move json files to res, but keep in mind to preserve relative
# path between module library and manifest json file
os.rename(os.path.join(self.package_folder, "share"), os.path.join(self.package_folder, "res"))
def package_info(self):
self.cpp_info.libs = ["VkLayer_utils"]
manifest_subfolder = "bin" if self.settings.os == "Windows" else os.path.join("res", "vulkan", "explicit_layer.d")
vk_layer_path = os.path.join(self.package_folder, manifest_subfolder)
self.output.info("Appending VK_LAYER_PATH environment variable: {}".format(vk_layer_path))
self.env_info.VK_LAYER_PATH.append(vk_layer_path)
| SpaceIm/conan-vulkan-validationlayers | conanfile.py | conanfile.py | py | 5,098 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "conans.ConanFile",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "conans.tools.check_min_cppstd",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "conans.tools",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "conans.to... |
17316210746 | """
Flask: Using templates
"""
from asyncore import read
from re import M
from turtle import title
from setup_db import select_students, select_courses
import sqlite3
from sqlite3 import Error
from flask import Flask, render_template, request, redirect, url_for, g
app = Flask(__name__)
DATABASE = './database.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route("/")
def index():
# get the database connection
conn = get_db()
return render_template("index.html",
students=select_students(conn),
courses=select_courses(conn)
)
@app.route("/courses", methods=["POST", "GET"])
def courses():
# get the database connection
if request.method == 'GET':
return render_template("courses_page.html"
# select_students executes SELECT SQL statement on database connetion
# returns list of students
)
else:
conn = get_db()
courseID=request.form['courseid']
cur = conn.cursor()
cur.execute("SELECT students.name, grades.course_id, courses.name, grades.grade FROM grades INNER JOIN students on students.student_no = grades.student_no INNER JOIN courses on grades.course_id = courses.id WHERE courses.id = ? ORDER BY grades.grade", (courseID,))
select_courses_id = []
for (students_name, course_id, coursesname, gradesgrade) in cur:
select_courses_id.append({
"students.name": students_name,
"grades.course_id": course_id,
"courses.name": coursesname,
"grades.grade": gradesgrade
})
print(select_courses_id)
return render_template("courses_page.html", select_courses_id=select_courses_id)
@app.route("/studentsP", methods=["POST", "GET"])
def studentsP():
# get the database connection
conn = get_db()
studentNo=None
if request.method == 'GET':
return render_template("courses_page.html",
)
else:
conn = get_db()
studentNo=request.form['student_id']
#studentNo=request.form['data']
cur = conn.cursor()
cur2 = conn.cursor()
cur2.execute("SELECT student_no, name FROM students WHERE student_no = ? ", (studentNo,))
select_student_name = []
for (student_n, studentsname) in cur2:
select_student_name.append({
"student_no":student_n,
"name": studentsname
})
break
print("Profile: ",select_student_name)
cur.execute("SELECT grades.course_id, courses.name, grades.grade FROM grades INNER JOIN students on students.student_no = grades.student_no INNER JOIN courses on grades.course_id = courses.id WHERE students.student_no = ?", (studentNo,))
select_student_no = []
for (coursid,coursesname,gradesgrade) in cur:
select_student_no.append({
"grades.course_id": coursid,
"courses.name": coursesname,
"grades.grade": gradesgrade
})
#print(select_student_no.index(coursesname))
return render_template("students_profile.html",
studentss=select_student_no,
select_student_nam = select_student_name)
@app.route("/addStudent", methods=["POST", "GET"])
def addStudent():
if request.method == 'GET':
# send the form
return render_template('add_student.html')
else:
student_no1 = request.form['student_id_no']
name1 = request.form['name']
conn = get_db()
cur = conn.cursor()
try:
sql = ''' INSERT INTO students(student_no,name)
VALUES(?,?) '''
cur.execute(sql, (student_no1, name1))
conn.commit(),
return redirect(url_for('index'))
except Error as e: # if error
# then display the error in 'error.html' page
return render_template('error.html', msg="Error adding new student.", error=e)
finally:
conn.close()
@app.route("/addGrade", methods=["POST", "GET"])
def addGrade():
if request.method == 'GET':
conn = get_db()
grades = [("A"), ("B"), ("C"), ("D"), ("E"), ("F")]
gra=grades
cur = conn.cursor()
#cur.execute("SELECT student_no FROM students WHERE name = ? ", (studentname,))
cur.execute("SELECT student_no FROM students")
select_student_name = []
for (student_n) in cur:
select_student_name.append({
"student_no":student_n
})
break
return render_template("add_grade.html",
students=select_students(conn),
courses=select_courses(conn),
gra = gra
)
else:
conn = get_db()
cur = conn.cursor()
studno = request.form['studno']
course = request.form['course']
grade = request.form['grade']
try:
sql = ''' INSERT INTO grades(student_no,course_id,grade)
VALUES(?,?,?) '''
cur.execute(sql, (studno, course, grade))
conn.commit(),
return redirect(url_for('index'))
except Error as e: # if error
# then display the error in 'error.html' page
return render_template('error.html', msg="Error adding new grade.", error=e)
finally:
conn.close()
if __name__ == "__main__":
app.run(debug=True) | m92kasem/VueJS-Flask-Full-Stack | assignment-6/app.py | app.py | py | 6,247 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "flask.g._database",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_nu... |
7417220562 | #!/usr/bin/env python3
from pydantic import BaseModel
from pydantic.schema import schema
from typing import Any
class dumClass(BaseModel):
A : str = 'Hello'
B : str = None
C: bool = False
def __init__(self, **data: Any):
print("dumClass was called")
super().__init__(**data)
print("dumClass was initialized")
if self.A == "Hello" :
self.B = "Goodbye"
def saySomething(self):
if self.C is False :
print("Shhhhhh......")
else :
print("A is " + self.A + "; B is " + self.B)
def troubleshoot() :
print("Testing the dumClass!")
thisJSON={"A":"Hello","C":True}
try :
thisproject = dumClass(**thisJSON)
except ValidationError as e :
print(e)
thisproject.saySomething()
def generateSchema():
import json
print(dumClass.schema_json(indent=2))
if __name__ == "__main__":
generateSchema()
troubleshoot()
| GLYCAM-Web/gems | gemsModules/deprecated/Examples/Sample_Pydantic_Usage.py | Sample_Pydantic_Usage.py | py | 956 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 12,
"usage_type": "name"
}
] |
23985904341 | import hashlib
import math
import os
import re
import tkinter
import tkinter as tk
from tkinter import messagebox, filedialog
import threading
import pymysql
import tkinter as tk
from tkinter import ttk
import pandas as pd
from tkinter import filedialog
from openpyxl import Workbook
from tkinter import simpledialog
from tkinter import messagebox
from datetime import datetime
from login import run_login
from tqdm import tqdm
from sql_ui import login
# 连接 MySQL 数据库
def show_table_info(username):
# class MySQLTableInfo(tk.Frame):
global current_user
current_user = username
def get_db_connection():
conn = pymysql.connect(
host='localhost',
port=3306,
user='gui',
password='LBD8TrTBeMZFBa8t',
db='gui',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
return conn
def close_connection(conn, cursor):
cursor.close()
conn.close()
# print("MySQL connection is closed")
# 创建 tkinter 窗口
root = tk.Tk()
root.title("MySQL Table Info")
root.geometry("1580x600")
style = ttk.Style()
style.theme_use("default")
style.configure("Custom.Treeview", font=("Calibri", 13), bordercolor='red', borderwidth=1, relief="solid")
tree = ttk.Treeview(root, style="Custom.Treeview", selectmode='browse', show='headings')
tree.pack(side='left', fill='both', expand=True)
table_header = ['ID','库名', '备注', '日期', '总量', '已搜索', '搜索有的', '待搜索', '异常', '账号归属']
tree["columns"] = table_header
# 设置第一列宽度为200
# tree.column('#0', width=100)
# 设置其它列宽度为100
for col in table_header:
if col=='日期':
tree.column(col, width=180)
elif col == 'id':
tree.column(col, width=80)
elif col == '库名':
tree.column(col, width=180)
elif col == '备注':
tree.column(col, width=180)
else:
tree.column(col, width=70)
# 表头
for i in table_header:
tree.column(i, anchor="center")
tree.heading(i, text=i, anchor='center')
# 查询所有表名
conn = get_db_connection()
cursor = conn.cursor()
cursor = conn.cursor()
cursor.execute("show tables")
tables = cursor.fetchall()
close_connection(conn, cursor)
# conn = get_db_connection()
# cursor = conn.cursor()
# 每页显示的条数
page_size = 10
# 获取数据总数
total_count = len(tables)
# 分页页数
page_count = (total_count + page_size - 1) // page_size
# 分页数据
pages = [tables[i:i+page_size] for i in range(0, total_count, page_size)]
# 当前页码
current_page = 1
# 显示指定页码的表格数据
def is_super_user(username):
conn = get_db_connection()
cursor = conn.cursor()
cursor.execute(f"SELECT is_suproot FROM users WHERE username = '{username}'")
result = cursor.fetchone()
close_connection(conn, cursor)
if result and result['is_suproot'] == 1:
return True
else:
return False
def show_table_data(page_num, username):
conn = get_db_connection()
cursor = conn.cursor()
tree.delete(*tree.get_children())
# 查询ordea_all表中的数据
if is_super_user(username):
cursor.execute(
f"SELECT * FROM ordea_all ORDER BY 日期 DESC LIMIT {page_size} OFFSET {(page_num - 1) * page_size}")
else:
cursor.execute(
f"SELECT * FROM ordea_all WHERE 账号归属 = '{username}' ORDER BY 日期 DESC LIMIT {page_size} OFFSET {(page_num - 1) * page_size}")
data = cursor.fetchall()
for row in data:
tree.insert('', 'end', values=(
row['id'], row['库名'], row['备注'], row['日期'], row['总量'], row['已搜索'], row['搜索有'], row['待搜索'],row['异常'],row['账号归属']))
close_connection(conn, cursor)
# 显示第一页的数据
show_table_data(current_page, username)
# 分页标签
page_label = tk.Label(root, text=f"页数: {current_page} / {page_count}")
page_label.pack(side='bottom', padx=3, pady=3)
def add_user():
# 判断是否是超级管理员,非超级管理员不展示添加用户的功能
if not is_super_user(username):
return
# 创建弹窗
add_user_window = tk.Toplevel(root)
add_user_window.title("添加用户")
# 添加用户名、密码输入框和添加按钮
username_label = tk.Label(add_user_window, text="用户名:")
username_label.grid(row=0, column=0, padx=5, pady=5)
username_entry = tk.Entry(add_user_window)
username_entry.grid(row=0, column=1, padx=5, pady=5)
password_label = tk.Label(add_user_window, text="密码:")
password_label.grid(row=1, column=0, padx=5, pady=5)
password_entry = tk.Entry(add_user_window, show="*")
password_entry.grid(row=1, column=1, padx=5, pady=5)
result_label = tk.Label(add_user_window, text="")
result_label.grid(row=2, column=0, columnspan=2, padx=5, pady=5)
def add_user_to_db():
# 从输入框中获取用户名和密码
new_username = username_entry.get()
new_password = password_entry.get()
# 对密码进行md5加密
md5 = hashlib.md5()
md5.update(new_password.encode())
new_password_md5 = md5.hexdigest()
# 查询数据库中是否已经有重名的用户
conn = get_db_connection()
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM users WHERE username='{new_username}'")
existing_user = cursor.fetchone()
close_connection(conn, cursor)
if existing_user:
result_label.config(text=f"添加用户失败:用户名 '{new_username}' 已存在", fg="red")
else:
# 添加用户到users表中
try:
conn = get_db_connection()
cursor = conn.cursor()
cursor.execute(
f"INSERT INTO users (id, username, password) VALUES (NULL, '{new_username}', '{new_password_md5}')")
conn.commit()
close_connection(conn, cursor)
result_label.config(text="添加用户成功", fg="green")
except Exception as e:
result_label.config(text=f"添加用户失败:{str(e)}", fg="red")
refresh()
add_button = tk.Button(add_user_window, text="添加", command=add_user_to_db)
add_button.grid(row=3, column=1, padx=5, pady=5)
# 将弹窗居中显示
add_user_window.geometry("+%d+%d" % ((root.winfo_screenwidth() - add_user_window.winfo_reqwidth()) / 2,
(root.winfo_screenheight() - add_user_window.winfo_reqheight()) / 2))
def logout():
conn = get_db_connection()
cursor = conn.cursor()
close_connection(conn, cursor)
root.destroy()
run_login()
logout_button = tk.Button(root, text="退出登录", command=logout)
logout_button.pack(side='left', padx=3, pady=3)
if is_super_user(username):
add_user_button = tk.Button(root, text="添加用户", command=add_user)
add_user_button.pack(side='left', padx=3, pady=3)
# 上一页按钮
def previous_page():
global current_page
if current_page > 1:
current_page -= 1
show_table_data(current_page,username)
page_label.config(text=f"页数: {current_page} / {page_count}")
previous_button = tk.Button(root, text="上一页", command=previous_page)
previous_button.pack(side='bottom', padx=3, pady=3)
# 下一页按钮
def next_page():
global current_page
if current_page < page_count:
current_page += 1
show_table_data(current_page,username)
page_label.config(text=f"页数: {current_page} / {page_count}")
next_button = tk.Button(root, text="下一页", command=next_page)
next_button.pack(side='bottom', padx=3, pady=3)
# 创建添加表的对话框
def create_table_dialog():
dialog = tk.Toplevel(root)
dialog.geometry("300x100")
dialog.title("添加库")
# 添加表名输入框
label = tk.Label(dialog, text="输入库名")
label.pack(side='top')
entry = tk.Entry(dialog)
entry.pack(side='top')
# 添加确认按钮
def confirm():
table_name = entry.get()
if not table_name:
return
create_table(username,table_name)
dialog.destroy()
refresh()
confirm_button = tk.Button(dialog, text="确认", command=confirm)
confirm_button.pack(side='bottom',padx=3, pady=3)
# 删除库
def delete_table():
# 获取选中的表名
conn = get_db_connection()
cursor = conn.cursor()
selected_item = tree.selection()
if not selected_item:
messagebox.showerror("错误", "未选中任何表格!")
return
table_name = tree.item(selected_item)['values'][1]
if not table_name.startswith('ordea_'):
messagebox.showerror("错误", "无法删除此表格!")
return
# 弹出确认删除对话框
if not tkinter.messagebox.askyesno("确认", f"是否确定删除 '{table_name}'?"):
return
# 删除 ordea_all 表中对应的记录
try:
cursor.execute(f"DELETE FROM ordea_all WHERE 库名='{table_name}'")
conn.commit()
except Exception as e:
print(e)
conn.rollback()
messagebox.showerror("错误", "删除记录失败!")
return
# 弹出是否删除数据表的对话框
if tkinter.messagebox.askyesno("确认", f"订单删除成功,是否也请客 '{table_name}'所有数据?"):
# 删除数据表
try:
cursor.execute(f"DROP TABLE `{table_name}`")
conn.commit()
except Exception as e:
print(e)
conn.rollback()
messagebox.showerror("错误", "删除数据表失败!")
return
# 刷新表格内容
close_connection(conn, cursor)
refresh()
# 创建删除库按钮
delete_button = tk.Button(root, text="删除库", command=delete_table)
delete_button.pack(side='bottom', padx=3, pady=3)
# 创建导出按钮
def export_to_excel_yss():
conn = get_db_connection()
cursor = conn.cursor()
selected_items = tree.selection()
if not selected_items:
return
file_name = ""
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select count(*) from {table_name}")
result = cursor.fetchone()
if result:
count = result['count(*)']
table_name1 = table_name.replace("ordea_", "")
file_name += f"{table_name1}_所有_{count}.xlsx"
break
file_path = filedialog.asksaveasfilename(initialfile=file_name, defaultextension='.xlsx')
if not file_path:
return
progress_window = tk.Toplevel(root)
progress_window.title("导出进度")
progress_label = tk.Label(progress_window, text="正在导出中,请稍等...")
progress_label.pack()
progress_bar = tk.ttk.Progressbar(progress_window, mode="indeterminate")
progress_bar.pack(pady=10)
progress_bar.start(10)
progress_window.update()
wb = Workbook()
ws = wb.active
headers = ['id', '手机号', '名字', '性别', '昵称', '账号', '省份', '城市', '会员', '头像', '归属省份', '归属城市',
'区划代码', '运营商', '状态']
for i, header in enumerate(headers):
ws.cell(row=1, column=i + 1, value=header)
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select * from {table_name} ")
data = cursor.fetchall()
for i, row in enumerate(data, start=2):
for j, value in enumerate(row.values()):
ws.cell(row=i, column=j + 1, value=value)
if os.path.exists(file_path):
try:
with open(file_path, 'wb') as f:
f.write('test'.encode('utf-8'))
except PermissionError:
progress_window.destroy()
messagebox.showerror("错误", "文件被占用,无法导出,请关闭该文件后重试")
return
wb.save(file_path)
close_connection(conn, cursor)
progress_bar.stop()
progress_window.destroy()
messagebox.showinfo("导出完成", f"导出成功,共导出{count}条数据")
refresh()
export_button = tk.Button(root, text="导出所有", command=export_to_excel_yss)
export_button.pack(side='bottom', padx=3, pady=3)
def export_to_excel_ssyd():
conn = get_db_connection()
cursor = conn.cursor()
selected_items = tree.selection()
if not selected_items:
return
file_name = ""
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select count(*) from {table_name} WHERE `gender` IS NOT NULL AND `gender` != ''")
result = cursor.fetchone()
if result:
count = result['count(*)']
table_name1 = table_name.replace("ordea_", "")
file_name += f"{table_name1}_搜索有的_{count}.xlsx"
break
file_path = filedialog.asksaveasfilename(initialfile=file_name, defaultextension='.xlsx')
if not file_path:
return
progress_window = tk.Toplevel(root)
progress_window.title("导出进度")
progress_label = tk.Label(progress_window, text="正在导出中,请稍等...")
progress_label.pack()
progress_bar = tk.ttk.Progressbar(progress_window, mode="indeterminate")
progress_bar.pack(pady=10)
progress_bar.start(10)
progress_window.update()
wb = Workbook()
ws = wb.active
headers = ['id', '手机号', '名字', '性别', '昵称', '账号', '省份', '城市', '会员', '头像', '归属省份', '归属城市',
'区划代码', '运营商', '状态']
for i, header in enumerate(headers):
ws.cell(row=1, column=i + 1, value=header)
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select * from {table_name} WHERE `gender` IS NOT NULL AND `gender` != '' ")
data = cursor.fetchall()
for i, row in enumerate(data, start=2):
for j, value in enumerate(row.values()):
ws.cell(row=i, column=j + 1, value=value)
if os.path.exists(file_path):
try:
with open(file_path, 'wb') as f:
f.write('test'.encode('utf-8'))
except PermissionError:
progress_window.destroy()
messagebox.showerror("错误", "文件被占用,无法导出,请关闭该文件后重试")
return
wb.save(file_path)
close_connection(conn, cursor)
progress_bar.stop()
progress_window.destroy()
messagebox.showinfo("导出完成", f"导出成功,共导出{count}条数据")
refresh()
export_button = tk.Button(root, text="导出搜索有的", command=export_to_excel_ssyd)
export_button.pack(side='bottom', padx=3, pady=3)
def export_to_excel_dss():
conn = get_db_connection()
cursor = conn.cursor()
selected_items = tree.selection()
if not selected_items:
return
file_name = ""
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select count(*) from {table_name} where status = 0 or status = 2 or status = 3")
result = cursor.fetchone()
if result:
count = result['count(*)']
table_name1 = table_name.replace("ordea_", "")
file_name += f"{table_name1}_待搜索_{count}.xlsx"
break
file_path = filedialog.asksaveasfilename(initialfile=file_name, defaultextension='.xlsx')
if not file_path:
return
progress_window = tk.Toplevel(root)
progress_window.title("导出进度")
progress_label = tk.Label(progress_window, text="正在导出中,请稍等...")
progress_label.pack()
progress_bar = tk.ttk.Progressbar(progress_window, mode="indeterminate")
progress_bar.pack(pady=10)
progress_bar.start(10)
progress_window.update()
wb = Workbook()
ws = wb.active
headers = ['id', '手机号', '名字', '性别', '昵称', '账号', '省份', '城市', '会员', '头像', '归属省份', '归属城市',
'区划代码', '运营商', '状态']
for i, header in enumerate(headers):
ws.cell(row=1, column=i + 1, value=header)
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select * from {table_name} where status = 0 or status = 2 or status = 3")
data = cursor.fetchall()
for i, row in enumerate(data, start=2):
for j, value in enumerate(row.values()):
ws.cell(row=i, column=j + 1, value=value)
if os.path.exists(file_path):
try:
with open(file_path, 'wb') as f:
f.write('test'.encode('utf-8'))
except PermissionError:
progress_window.destroy()
messagebox.showerror("错误", "文件被占用,无法导出,请关闭该文件后重试")
return
wb.save(file_path)
close_connection(conn, cursor)
progress_bar.stop()
progress_window.destroy()
messagebox.showinfo("导出完成", f"导出成功,共导出{count}条数据")
refresh()
export_button = tk.Button(root, text="导出待搜索", command=export_to_excel_dss)
export_button.pack(side='bottom', padx=3, pady=3)
def export_to_excel_yc():
conn = get_db_connection()
cursor = conn.cursor()
selected_items = tree.selection()
if not selected_items:
return
file_name = ""
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select count(*) from {table_name} where status = 3 or status = 2")
result = cursor.fetchone()
if result:
count = result['count(*)']
table_name1 = table_name.replace("ordea_", "")
file_name += f"{table_name1}_异常_{count}.xlsx"
break
file_path = filedialog.asksaveasfilename(initialfile=file_name, defaultextension='.xlsx')
if not file_path:
return
progress_window = tk.Toplevel(root)
progress_window.title("导出进度")
progress_label = tk.Label(progress_window, text="正在导出中,请稍等...")
progress_label.pack()
progress_bar = tk.ttk.Progressbar(progress_window, mode="indeterminate")
progress_bar.pack(pady=10)
progress_bar.start(10)
progress_window.update()
wb = Workbook()
ws = wb.active
headers = ['id', '手机号', '名字', '性别', '昵称', '账号', '省份', '城市', '会员', '头像', '归属省份', '归属城市',
'区划代码', '运营商', '状态']
for i, header in enumerate(headers):
ws.cell(row=1, column=i + 1, value=header)
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select * from {table_name} where status = 3 or status = 2")
data = cursor.fetchall()
for i, row in enumerate(data, start=2):
for j, value in enumerate(row.values()):
ws.cell(row=i, column=j + 1, value=value)
if os.path.exists(file_path):
try:
with open(file_path, 'wb') as f:
f.write('test'.encode('utf-8'))
except PermissionError:
progress_window.destroy()
messagebox.showerror("错误", "文件被占用,无法导出,请关闭该文件后重试")
return
wb.save(file_path)
close_connection(conn, cursor)
progress_bar.stop()
progress_window.destroy()
messagebox.showinfo("导出完成", f"导出成功,共导出{count}条数据")
refresh()
export_button = tk.Button(root, text="导出异常", command=export_to_excel_yc)
export_button.pack(side='bottom', padx=3, pady=3)
def export_to_excel_yss():
conn = get_db_connection()
cursor = conn.cursor()
selected_items = tree.selection()
if not selected_items:
return
file_name = ""
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select count(*) from {table_name} where status = 1")
result = cursor.fetchone()
if result:
count = result['count(*)']
table_name1 = table_name.replace("ordea_", "")
file_name += f"{table_name1}_已搜索_{count}.xlsx"
break
file_path = filedialog.asksaveasfilename(initialfile=file_name, defaultextension='.xlsx')
if not file_path:
return
progress_window = tk.Toplevel(root)
progress_window.title("导出进度")
progress_label = tk.Label(progress_window, text="正在导出中,请稍等...")
progress_label.pack()
progress_bar = tk.ttk.Progressbar(progress_window, mode="indeterminate")
progress_bar.pack(pady=10)
progress_bar.start(10)
progress_window.update()
wb = Workbook()
ws = wb.active
headers = ['id', '手机号', '名字', '性别', '昵称', '账号', '省份', '城市', '会员', '头像', '归属省份', '归属城市',
'区划代码', '运营商', '状态']
for i, header in enumerate(headers):
ws.cell(row=1, column=i + 1, value=header)
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"select * from {table_name} where status = 1")
data = cursor.fetchall()
for i, row in enumerate(data, start=2):
for j, value in enumerate(row.values()):
ws.cell(row=i, column=j + 1, value=value)
if os.path.exists(file_path):
try:
with open(file_path, 'wb') as f:
f.write('test'.encode('utf-8'))
except PermissionError:
progress_window.destroy()
messagebox.showerror("错误", "文件被占用,无法导出,请关闭该文件后重试")
return
wb.save(file_path)
close_connection(conn, cursor)
progress_bar.stop()
progress_window.destroy()
messagebox.showinfo("导出完成", f"导出成功,共导出{count}条数据")
refresh()
export_button = tk.Button(root, text="导出已搜索", command=export_to_excel_yss)
export_button.pack(side='bottom', padx=3, pady=3)
def reset_data():
conn = get_db_connection()
cursor = conn.cursor()
selected_items = tree.selection()
if not selected_items:
return
reset_count = 0
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"update {table_name} set status=0 where status=2 or status=3")
reset_count += cursor.rowcount
conn.commit()
close_connection(conn, cursor)
messagebox.showinfo("提示", f"本次成功重置({reset_count})条异常数据")
refresh()
reset_button = tk.Button(root, text="重置异常", command=reset_data)
reset_button.pack(side='bottom', padx=3, pady=3)
def reset_data_kb():
conn = get_db_connection()
cursor = conn.cursor()
selected_items = tree.selection()
if not selected_items:
return
reset_count = 0
for item in selected_items:
table_name = tree.set(item, '库名')
cursor.execute(f"update {table_name} set status=0 WHERE `gender` IS NOT NULL AND `gender` != '' ")
reset_count += cursor.rowcount
conn.commit()
close_connection(conn, cursor)
messagebox.showinfo("提示", f"本次成功重置({reset_count})条空白数据")
refresh()
reset_button = tk.Button(root, text="重置空白", command=reset_data_kb)
reset_button.pack(side='bottom', padx=3, pady=3)
# 新增“修改备注”的对话框
def modify_remark_dialog():
conn = get_db_connection()
cursor = conn.cursor()
# 获取当前选中行的数据
selected_item = tree.focus()
if not selected_item:
messagebox.showwarning("Warning", "Please select a row first.")
return
values = tree.item(selected_item, 'values')
table_name = values[1]
remark = values[1]
dialog = tk.Toplevel(root)
dialog.geometry("300x200")
dialog.title("修改备注")
# 显示库名和当前备注信息
label1 = tk.Label(dialog, text=f"库名:{table_name}")
label1.pack(side='top')
label2 = tk.Label(dialog, text="当前备注:")
label2.pack(side='top')
old_remark = tk.Label(dialog, text=remark)
old_remark.pack(side='top')
# 添加输入框和确认按钮
label3 = tk.Label(dialog, text="新备注:")
label3.pack(side='top')
entry = tk.Entry(dialog)
entry.pack(side='top')
def confirm():
conn = get_db_connection()
cursor = conn.cursor()
new_remark = entry.get()
if not new_remark:
messagebox.showwarning("Warning", "Please enter new remark.")
return
# 更新数据库中的备注信息
cursor.execute(f"UPDATE ordea_all SET 备注='{new_remark}' WHERE 库名='{table_name}'")
conn.commit()
# 更新表格中的备注信息
tree.item(selected_item, values=(table_name, new_remark, values[2]))
dialog.destroy()
refresh()
close_connection(conn, cursor)
confirm_button = tk.Button(dialog, text="确认", command=confirm)
confirm_button.pack(side='bottom',padx=3, pady=3)
# 新增“修改备注”按钮
modify_remark_button = tk.Button(root, text="修改备注", command=modify_remark_dialog)
modify_remark_button.pack(side='bottom',padx=3, pady=3)
def import_data():
conn = get_db_connection()
cursor = conn.cursor()
# 获取选中的表名
selected_item = tree.selection()
if not selected_item:
messagebox.showerror("错误", "未选中任何表格!")
return
table_name = tree.item(selected_item)['values'][1]
if not table_name.startswith('ordea_'):
messagebox.showerror("错误", "无法导入数据到此表格!")
return
# 弹出文件选择对话框
file_path = filedialog.askopenfilename(filetypes=[("Text Files", "*.txt"), ("All Files", "*.*")])
if not file_path:
return
# 弹出导入进度对话框
import_progress = tk.Toplevel(root)
import_progress.title("导入进度")
import_progress.geometry("400x100") # 设置弹窗大小
progress_label = tk.Label(import_progress, text="正在导入数据,请稍等...", font=("Helvetica", 16))
progress_label.pack(pady=20)
def update_progress(current_count, total_count, progress_label):
progress_percent = current_count * 100 / total_count
progress_label.config(text=f"已导入 {current_count} / {total_count} 行数据 ({progress_percent:.2f}%)")
import_progress.update_idletasks()
def import_thread():
# 打开文件并读取数据
with open(file_path, 'r') as f:
lines = f.readlines()
total_count = len(lines)
current_count = 0
success_count = 0
repeat_count = 0
# 分批导入数据
batch_size = 10000
for i in range(0, total_count, batch_size):
batch_data = []
for line in lines[i:i + batch_size]:
data = line.strip().split()
batch_data.append(data)
# 插入数据到数据库
try:
cursor.executemany(f"INSERT IGNORE INTO `{table_name}` (`phone`, `status`) VALUES (%s, 0)",
batch_data)
conn.commit()
success_count += cursor.rowcount
repeat_count += len(batch_data) - cursor.rowcount
except Exception as e:
print(e)
conn.rollback()
messagebox.showerror("错误", "插入数据失败,请检查是否非11位手机号。")
close_connection(conn, cursor)
return
refresh
# 更新导入进度
current_count += len(batch_data)
progress_percent = current_count * 100 / total_count
progress_label.config(
text=f"已导入 {current_count} / {total_count} 行数据 ({progress_percent:.2f}%)")
import_progress.update_idletasks()
# 关闭导入进度对话框
import_progress.destroy()
messagebox.showinfo("信息", f"数据导入完成!成功导入 {success_count} 行数据,重复数据 {repeat_count} 行。")
close_connection(conn, cursor)
refresh()
# import_thread = threading.Thread(target=import_thread)
# import_thread.start()
import_thread()
# 创建导入数据的按钮
import_button = tk.Button(root, text="导入数据", command=import_data)
import_button.pack(side='bottom', padx=3, pady=3)
def open_upload_dialog():
conn = get_db_connection()
cursor = conn.cursor()
# 获取选中的表名
selected_item = tree.selection()
if not selected_item:
messagebox.showerror("错误", "未选中任何表格!")
return
table_name = tree.item(selected_item)['values'][1]
if not table_name.startswith('ordea_'):
messagebox.showerror("错误", "无法导入数据到此表格!")
return
# 弹出文本输入框
upload_window = tk.Toplevel(root)
upload_window.title("输入上传数据")
upload_text = tk.Text(upload_window, width=50, height=10)
upload_text.pack(padx=10, pady=10)
def import_data_from_text():
# 读取输入的数据
data = upload_text.get("1.0", "end").strip().split()
if not data:
messagebox.showwarning("警告", "输入数据为空!")
return
# 弹出导入进度对话框
import_progress = tk.Toplevel(root)
import_progress.title("导入进度")
progress_label = tk.Label(import_progress, text="正在导入数据,请稍等...")
progress_label.pack()
# 插入数据到数据库
try:
cursor.executemany(f"INSERT IGNORE INTO `{table_name}` (`phone`, `status`) VALUES (%s, 0)", data)
conn.commit()
except Exception as e:
print(e)
conn.rollback()
messagebox.showerror("错误", "插入数据失败,请检查是否数据重复,或者非11位手机号。")
return
# 关闭导入进度对话框
import_progress.destroy()
messagebox.showinfo("信息", "数据导入完成!")
# 关闭输入上传数据的对话框
upload_window.destroy()
close_connection(conn, cursor)
refresh()
# 添加导入按钮
import_button = tk.Button(upload_window, text="导入", command=import_data_from_text)
import_button.pack(pady=10)
# 创建上传数据按钮
upload_button = tk.Button(root, text="输入数据", command=open_upload_dialog)
upload_button.pack(side='bottom',padx=3, pady=3)
def create_table(username, table_name):
conn = get_db_connection()
cursor = conn.cursor()
# 查询表名是否已经存在
cursor.execute(f"SHOW TABLES LIKE '{table_name}'")
result = cursor.fetchone()
if result:
messagebox.showerror("Error", "Table already exists!")
return
table_name = 'ordea_' + table_name
# 创建表
sql = f'''
CREATE TABLE `{table_name}` (
`id` INT NOT NULL AUTO_INCREMENT,
`phone` VARCHAR(11) NOT NULL,
`name` VARCHAR(255),
`gender` VARCHAR(255),
`nickname` VARCHAR(255),
`account` VARCHAR(255),
`province` VARCHAR(255),
`city` VARCHAR(255),
`member` VARCHAR(255),
`avatar` VARCHAR(255),
`province_isp` VARCHAR(255),
`city_isp` VARCHAR(255),
`zoning_code` VARCHAR(255),
`isp` VARCHAR(255),
`status` INT,
`by1` VARCHAR(255),
`by2` VARCHAR(255),
`by3` VARCHAR(255),
PRIMARY KEY (`id`),
UNIQUE KEY `unique_phone` (`phone`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
'''
try:
cursor.execute(sql.format(table_name=table_name))
print('1')
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
cursor.execute(
f"INSERT INTO ordea_all (库名, 备注, 日期, 账号归属) VALUES ('{table_name}', '', '{now}', '{username}')")
conn.commit()
tree.insert('', 'end', values=(table_name, 0))
show_table_data(current_page,username) # 刷新整个表格的内容显示
except Exception as e:
print(e)
conn.rollback()
messagebox.showerror("Error", "Create table failed!")
close_connection(conn, cursor)
refresh()
create_table_button = tk.Button(root, text="添加库", command=create_table_dialog)
create_table_button.pack(side='bottom', padx=3, pady=3)
# 刷新表格内容
def refresh_table_data():
global tables
conn = get_db_connection()
cursor = conn.cursor()
cursor.execute("SELECT * FROM ordea_all")
tables = cursor.fetchall()
tree.delete(*tree.get_children())
print(tables)
for table in tables:
id = table['id']
table_name = table['库名']
remark = table['备注']
date = table['日期']
cursor.execute(f"select count(*) from `{table_name}`")
count = cursor.fetchone()['count(*)']
tree.insert('', 'end', values=(id,table_name, remark, date, count))
# 更新分页相关数据
global total_count, page_count, pages
total_count = len(tables)
page_count = (total_count + page_size - 1) // page_size
pages = [tables[i:i+page_size] for i in range(0, total_count, page_size)]
# 显示第一页的数据
global current_page
current_page = 1
show_table_data(current_page,username)
# 更新分页标签
page_label.config(text=f"分页: {current_page} / {page_count}")
close_connection(conn, cursor)
# 新增“刷新”按钮
def refresh():
# 清空表格
conn = get_db_connection()
cursor = conn.cursor()
tree.delete(*tree.get_children())
# 查询数据库
cursor.execute("SELECT * FROM ordea_all ORDER BY 日期 DESC")
data = cursor.fetchall()
if not data:
return
for row in data:
table_name = row['库名']
cursor.execute(f"SELECT COUNT(*) FROM `{table_name}`")
result = cursor.fetchone()
# print(f"Result: {result}")
count = result.get('COUNT(*)', 0)
cursor.execute(f"SELECT COUNT(*) FROM `{table_name}` WHERE `status` IN (0, 2)")
result = cursor.fetchone()
pending_count = result.get('COUNT(*)', 0)
cursor.execute(f"SELECT COUNT(*) FROM `{table_name}` WHERE `status` = 1")
result = cursor.fetchone()
searched_count = result.get('COUNT(*)', 0)
cursor.execute(f"SELECT COUNT(*) FROM `{table_name}` WHERE `status` = 3")
result = cursor.fetchone()
exception_count = result.get('COUNT(*)', 0)
cursor.execute(f"SELECT COUNT(*) FROM `{table_name}` WHERE `gender` IS NOT NULL AND `gender` != ''")
result = cursor.fetchone()
search_success_count = result.get('COUNT(*)', 0)
cursor.execute(f"UPDATE ordea_all SET `总量`={count}, `待搜索`={pending_count}, `已搜索`={searched_count}, `异常`={exception_count}, `搜索有`={search_success_count} WHERE `库名`='{table_name}'")
conn.commit()
# tree.insert('', 'end', values=(row['库名'], row['备注'], count, pending_count, searched_count, exception_count, search_success_count, row['日期']))
# 更新页码和分页数据
global total_count, page_count, pages
total_count = len(data)
page_count = (total_count + page_size - 1) // page_size
pages = [data[i:i+page_size] for i in range(0, total_count, page_size)]
# 更新当前页码和分页标签
global current_page
current_page = 1
page_label.config(text=f"页数: {current_page} / {page_count}")
# 显示第一页的数据
show_table_data(current_page,username)
close_connection(conn, cursor)
refresh_button = tk.Button(root, text="刷新", command=refresh)
refresh_button.pack(side='bottom',padx=3, pady=3)
def run_login(master=None):
# 创建登录窗口
login_window = tk.Tk()
login_window.geometry("300x200")
login_window.title("登录")
# 创建用户名和密码输入框
username_label = tk.Label(login_window, text="用户名:")
username_label.pack(pady=10)
username_entry = tk.Entry(login_window)
username_entry.pack(pady=5)
password_label = tk.Label(login_window, text="密码:")
password_label.pack()
password_entry = tk.Entry(login_window, show="*")
password_entry.pack(pady=5)
# 定义登录按钮的点击事件
def login():
# 获取输入的用户名和密码
username = username_entry.get()
password = password_entry.get()
hashed_password = hashlib.md5(password.encode('utf-8')).hexdigest()
# 连接数据库
conn = pymysql.connect(
host='localhost',
port=3306,
user='gui',
password='LBD8TrTBeMZFBa8t',
db='gui',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
cursor = conn.cursor()
# 查询用户名和密码是否正确
cursor.execute(f"SELECT * FROM users WHERE username='{username}' AND password='{hashed_password}'")
result = cursor.fetchone()
if result:
# 用户名和密码验证通过,关闭登录窗口并执行回调函数
login_window.destroy()
login_success(result['username'])
else:
# 用户名和密码验证失败,弹出提示框
messagebox.showerror("错误", "用户名或密码不正确!")
cursor.close()
conn.close()
# 创建登录按钮
login_button = tk.Button(login_window, text="登录", command=login)
login_button.pack(pady=10)
login_window.mainloop()
def login_success(username):
# messagebox.showinfo("提示", f"欢迎 {username} 登录!")
show_table_info(username)
if __name__ == '__main__':
# 直接打开登录窗口
run_login()
# 启动 Tkinter 事件循环
| yahayaha001/mysql-table | app.py | app.py | py | 42,009 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pymysql.connect",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Tk",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Style",
... |
26443318875 | from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.contrib.auth import get_user_model, login, logout
from django.contrib import messages
from . import forms, models
# Create your views here.
User = get_user_model()
def signup_view(request):
if request.GET.get('validation') == 'true':
json = {}
msg = '{} is already taken'
name = request.GET.get('name')
print(request.GET)
if name == 'email':
email_value = request.GET.get('value')
json['name'] = name
try:
User.objects.get(email = email_value)
json['taken'] = True
json['msg'] = msg.format(name)
except User.DoesNotExist:
json['taken'] = False
if name == 'username':
username_value = request.GET.get('value')
json['name'] = name
try:
User.objects.get(username = username_value)
json['taken'] = True
json['msg'] = msg.format(name)
except User.DoesNotExist:
json["taken"] = False
return JsonResponse(json)
if request.method == 'POST':
form = forms.CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
if user:
messages.success(request, 'You may now login to your account')
print('User saved %r' % (user,))
return redirect('system_auth:sign-in')
return render(request, 'system_auth/sign-up.html')
def signin_view(request):
if request.method == 'POST':
form = forms.AuthenticationForm(request, request.POST)
response = None
if form.is_valid():
login(request, form.user)
try:
groups = form.user.groups.all()
except Exception as e:
response = redirect('client:home')
else:
group_names = [g.name for g in groups]
if 'staffs' in group_names:
response = redirect('staff:home')
else:
response = redirect('client:home')
return response
else:
messages.error(request, 'The email or username and password are incorrect. Try again')
return render(request, 'system_auth/sign-in.html')
def logout_view(request):
logout(request)
return redirect('system_auth:sign-in')
def unauthorized(request):
return render(request, 'system_auth/unauthorized.html') | jhonas-palad/permit-application-web | system_auth/views.py | views.py | py | 2,659 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 47,
"usage_type": "call"
},... |
74914202912 | import os
import tempfile
print(tempfile.gettempdir())
print(tempfile.gettempprefix())
with tempfile.TemporaryFile("w+") as tfp:
tfp.write("Some temp data")
tfp.seek(0)
print(tfp.read())
with tempfile.TemporaryDirectory() as tdp:
filepath = os.path.join(tdp, "tempfile.txt")
print(filepath)
with open(filepath, "w+") as tfp:
tfp.write("Temp file in temp directory")
tfp.seek(0)
print(tfp.read())
| cfleschhut/python-standard-library-essential-training-linkedin | files_and_directories/02_temporary_files_and_directories.py | 02_temporary_files_and_directories.py | py | 449 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tempfile.gettempdir",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tempfile.gettempprefix",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryFile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tempfi... |
24673403788 | # This code is modified from https://github.com/haoheliu/DCASE_2022_Task_5
# This code is modified from DCASE 2022 challenge https://github.com/c4dm/dcase-few-shot-bioacoustic
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from src.models.meta_learning import BaseModel
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
import pandas as pd
from src.evaluation_metrics.evaluation import *
from src.utils.feature_extractor import *
import time
from sklearn.metrics import classification_report, f1_score
from src.utils.post_processing import *
from src.evaluation_metrics.evaluation_confidence_intervals import *
from copy import deepcopy
import random
import torch.optim as optim
import h5py
from src.models.triplet_loss import *
class TriNet(BaseModel):
def __init__(self, config):
super(TriNet, self).__init__(config)
self.test_loop_batch_size = config.val.test_loop_batch_size
# self.loss_fn = TripletLossHard(margin= self.config.train.margin)
self.loss_fn = TripletLoss(margin= self.config.train.margin)
self.approx = True
self.ce = nn.CrossEntropyLoss()
self.cosloss = nn.CosineEmbeddingLoss(margin= 0.95)
# def inner_loop(self, support_data, support_label = None, mode = 'train'):
# local_model = deepcopy(self.feature_extractor)
# local_model.train()
# local_optim = optim.SGD(local_model.par
# ameters(), self.config.train.lr_inner, momentum = self.config.train.momentum, weight_decay=self.config.train.weight_decay)
# local_optim.zero_grad()
# fast_parameters = list(local_model.parameters())
# if mode == 'test':
# support_label = support_label.cpu().numpy()
# for i in range(100):
# if mode == 'train':
# label = random.randint(0, 1)
# # same class
# if label == 0:
# class1 = random.randint(0, self.n_way - 1)
# indices1 = [i for i in range(len(support_data)) if i % self.n_way == class1]
# index1, index2 = random.sample(indices1, 2)
# sampl1 = support_data[index1]
# sampl2 = support_data[index2]
# else:
# class1, class2 = random.sample(range(self.n_way), 2)
# indices1 = [i for i in range(len(support_data)) if i % self.n_way == class1]
# indices2 = [i for i in range(len(support_data)) if i % self.n_way == class2]
# # Randomly select two different indices
# index1 = random.sample(indices1, 1)
# index2 = random.sample(indices2, 1)
# sampl1 = support_data[index1[0]]
# sampl2 = support_data[index2[0]]
# else:
# label = random.randint(0, 1)
# # same class
# if label == 0:
# class1 = random.randint(0, 1)
# indices1 = np.where(support_label == class1)[0].tolist()
# index1, index2 = random.sample(indices1, 2)
# sampl1 = support_data[index1]
# sampl2 = support_data[index2]
# else:
# class1 = 0
# class2 = 1
# indices1 = np.where(support_label == class1)[0].tolist()
# indices2 = np.where(support_label == class2)[0].tolist()
# # Randomly select two different indices
# index1 = random.sample(indices1, 1)
# index2 = random.sample(indices2, 1)
# sampl1 = support_data[index1[0]]
# sampl2 = support_data[index2[0]]
# sampl1 = sampl1.unsqueeze(0)
# sampl2 = sampl2.unsqueeze(0)
# # print('label:{}'.format(label))
# # print(sampl1.shape)
# # print(sampl2.shape)
# # print('~~~~~~~~~~~')
# feat1 = local_model(sampl1)
# feat2 = local_model(sampl2)
# loss = self.loss_fn(feat1, feat2, label)
# if self.approx:
# grad = torch.autograd.grad(loss, fast_parameters)
# else:
# grad = torch.autograd.grad(loss, fast_parameters, create_graph=True)
# for k, weight in enumerate(local_model.parameters()):
# # for usage of weight.fast, please see Linear_fw, Conv_fw in backbone.py
# weight.grad = grad[k]
# local_optim.step()
# local_optim.zero_grad()
# loss = loss.detach()
# if mode != 'train':
# print('inner loop: loss:{:.3f}'.format(loss.item()))
# if mode != 'train':
# print('!!!!!!!!!')
# return local_model
def euclidean_dist(self,query, support):
n = query.size(0)
m = support.size(0)
query = F.normalize(query, dim=1)
support = F.normalize(support, dim=1)
query = query.unsqueeze(1).expand(n, m, -1)
support = support.unsqueeze(0).expand(n, m, -1)
return torch.sqrt(torch.pow(query - support, 2).sum(2))
def cossim(self, query, support):
cos_sim = torch.nn.CosineSimilarity(dim=-1, eps=1e-6)
n = query.size(0)
m = support.size(0)
query = query.unsqueeze(1).expand(n, m, -1)
support = support.unsqueeze(0).expand(n, m, -1)
return cos_sim(query, support)
def feed_forward_test(self, prototype, query_data):
# Execute a model with given output layer weights and inputs
query_feat = self.feature_extractor(query_data)
dists = self.euclidean_dist(query_feat, prototype)
# dists = self.cossim(query_feat, prototype)
# print(dists)
pred = dists.argmin(-1)
scores = -dists
preds = F.softmax(scores, dim = 1)
preds = preds.detach().cpu().numpy()
return preds, query_feat
def train_loop(self, data_loader, optimizer):
self.feature_extractor.train()
for i, batch in tqdm(enumerate(data_loader)):
data, label = batch
# print(label)
self.feature_extractor.zero_grad()
feat = self.feature_extractor(data)
feat = F.normalize(feat, dim=1)
# loss = self.loss_fn(anchor_feat, pos_feat, torch.zeros(anchor.shape[0]).to(self.device))
# loss = loss + self.loss_fn(anchor_feat, neg_feat, torch.ones(anchor.shape[0]).to(self.device))
# loss = self.cosloss(anchor_feat, pos_feat, torch.ones(anchor.shape[0]).to(self.device)) + self.cosloss(anchor_feat, neg_feat, -torch.ones(anchor.shape[0]).to(self.device))
loss = self.loss_fn(feat, label)
loss.backward()
# for i in self.parameters():
# print(i.grad)
optimizer.step()
print('loss:{:.3f}'.format(loss.item()))
# print('~~~~~~~~~~~')
def test_loop(self, test_loader , fix_shreshold = None, mode = 'test'):
self.feature_extractor.eval()
all_prob = {}
all_meta = {}
for i, (pos_sup, neg_sup, query, seg_len, seg_hop, query_start, query_end, label) in enumerate(test_loader):
seg_hop = seg_hop.item()
query_start = query_start.item()
# print(pos_sup[1].squeeze().shape)
# print(neg_sup[1].squeeze().shape)
# print(query.shape)
wav_file= pos_sup[0][0].split('&')[1]
all_meta[wav_file]={}
all_meta[wav_file]['start'] = query_start
all_meta[wav_file]['end'] = query_end
all_meta[wav_file]['seg_hop'] = seg_hop
all_meta[wav_file]['seg_len'] = seg_len
all_meta[wav_file]['label'] = label[0]
feat_file = os.path.splitext(os.path.basename(wav_file))[0] + '.hdf5'
feat_file = os.path.join('/root/task5_2023/latent_feature/TNN_noMAML', feat_file)
if os.path.isfile(feat_file):
os.remove(feat_file)
directory = os.path.dirname(feat_file)
if not os.path.exists(directory):
os.makedirs(directory)
# print(wav_file)
# print(query_start)
pos_data = pos_sup[1].squeeze()
query = query.squeeze()
query_dataset = TensorDataset(query, torch.zeros(query.shape[0]))
query_loader = DataLoader(query_dataset, batch_size=128, shuffle=False)
pos_dataset = TensorDataset(pos_data, torch.zeros(pos_data.shape[0]))
pos_loader = DataLoader(pos_dataset, batch_size=self.test_loop_batch_size, shuffle=False)
prob_mean = []
for i in range(3):
test_loop_neg_sample = self.config.val.test_loop_neg_sample
neg_sup[1] = neg_sup[1].squeeze()
if neg_sup[1].shape[0] > test_loop_neg_sample:
neg_indices = torch.randperm(neg_sup[1].shape[0])[:test_loop_neg_sample]
neg_seg_sample = neg_sup[1][neg_indices]
else:
neg_seg_sample = neg_sup[1]
# neg_seg_sample = neg_sup[1]
neg_dataset = TensorDataset(neg_seg_sample, torch.zeros(neg_seg_sample.shape[0]))
neg_loader = DataLoader(neg_dataset, batch_size=self.test_loop_batch_size, shuffle=False)
support_data = torch.cat([pos_data, neg_seg_sample], dim=0)
# support_data = pos_data
m = pos_data.shape[0]
n = neg_seg_sample.shape[0]
support_label = np.concatenate((np.zeros((m,)), np.ones((n,))))
support_label = torch.from_numpy(support_label).long().to(self.device)
support_feats = self.feature_extractor(support_data)
with h5py.File(feat_file, 'w') as f:
f.create_dataset("features", (0, 512), maxshape=(None, 512))
f.create_dataset("labels", data=label.squeeze(0).numpy())
f.create_dataset("features_t", data = support_feats.detach().cpu().numpy())
f.create_dataset("labels_t", data=support_label.cpu().numpy())
pos_feat = []
for batch in pos_loader:
p_data, _ = batch
feat = self.feature_extractor.forward(p_data)
# print(feat.shape)
pos_feat.append(feat.mean(0))
pos_feat = torch.stack(pos_feat, dim=0).mean(0)
neg_feat = []
with torch.no_grad():
for batch in neg_loader:
n_data, _ = batch
# print(neg_data.shape)
feat = self.feature_extractor.forward(n_data)
# print(feat.shape)
neg_feat.append(feat.mean(0))
neg_feat = torch.stack(neg_feat, dim=0).mean(0)
proto = torch.stack([pos_feat,neg_feat], dim=0)
prob_all = []
for batch in tqdm(query_loader):
query_data, _ = batch
prob, feats = self.feed_forward_test(proto, query_data)
prob_all.append(prob)
with h5py.File(feat_file, 'a') as f:
size = f['features'].shape[0]
nwe_size = f['features'].shape[0] + feats.shape[0]
f['features'].resize((nwe_size, 512))
f['features'][size:nwe_size] = feats.detach().cpu().numpy()
prob_all = np.concatenate(prob_all, axis=0)
#########################################################################
prob_all = prob_all[:,0]
# prob_all = np.where(prob_all>self.config.val.threshold, 1, 0)
prob_mean.append(prob_all)
prob_mean = np.stack(prob_mean, axis=0).mean(0)
all_prob[wav_file] = prob_mean
best_res = None
best_f1 = 0
best_report = {}
best_threshold = 0
for threshold in np.arange(0.5, 1, 0.1):
if fix_shreshold is not None:
threshold = fix_shreshold
report_f1 = {}
all_time = {'Audiofilename':[], 'Starttime':[], 'Endtime':[]}
for wav_file in all_prob.keys():
prob = np.where(all_prob[wav_file]>threshold, 1, 0)
# acc = np.sum(prob^1 == np.array(all_meta[wav_file]['label']))/len(prob)
# 计算分类报告
y_pred = prob^1
y_true = np.array(all_meta[wav_file]['label'])
# print(all_meta[wav_file]['seg_hop'])
# print(all_meta[wav_file]['seg_len'])
# 输出分类报告
report_f1[os.path.basename(wav_file)] = classification_report(y_true, y_pred,zero_division=0, digits=5)
# 计算各个类别的F1分数
# f1_scores = f1_score(y_true, y_pred, average=None)
# 输出各个类别的F1分数
# print("F1 scores for each class:")
# print(f1_scores)
# print(len(prob))
# print(np.sum(prob))
# print(np.sum(prob)/len(prob))
on_set = np.flatnonzero(np.diff(np.concatenate(([0],prob), axis=0))==1)
off_set = np.flatnonzero(np.diff(np.concatenate((prob,[0]), axis=0))==-1) + 1 #off_set is the index of the first 0 after 1
# for i, j in zip(on_set, off_set):
# print(i,j)
on_set_time = on_set*all_meta[wav_file]['seg_hop']/self.fps + all_meta[wav_file]['start']
off_set_time = off_set*all_meta[wav_file]['seg_hop']/self.fps + all_meta[wav_file]['start']
all_time['Audiofilename'].extend([os.path.basename(wav_file)]*len(on_set_time))
all_time['Starttime'].extend(on_set_time)
all_time['Endtime'].extend(off_set_time)
# print(wav_file)
# print(on_set_time[:5])
# print('query_start', all_meta[wav_file]['start'])
for i in range(len(off_set_time)):
if off_set_time[i] > all_meta[wav_file]['end']:
raise ValueError('off_set_time is larger than query_end')
df_all_time = pd.DataFrame(all_time)
df_all_time = post_processing(df_all_time, self.config, mode)
df_all_time = df_all_time.astype('str')
pred_path = normalize_path(self.config.checkpoint.pred_dir)
pred_path = os.path.join(pred_path, 'pred_{:.2f}.csv'.format(threshold))
if not os.path.exists(os.path.dirname(pred_path)):
os.makedirs(os.path.dirname(pred_path))
df_all_time.to_csv(pred_path, index=False)
ref_files_path = test_loader.dataset.val_dir
report_dir = normalize_path(self.config.checkpoint.report_dir)
report = evaluate(df_all_time, ref_files_path, self.config.team_name, self.config.dataset, report_dir)
if report['overall_scores']['fmeasure (percentage)'] > best_f1:
best_f1 = report['overall_scores']['fmeasure (percentage)']
best_res = report
best_report = report_f1
best_threshold = threshold
if fix_shreshold is not None:
break
for i in best_report.keys():
print(i)
print(best_report[i])
print('~~~~~~~~~~~~~~~')
print(best_res)
print('best_threshold', best_threshold)
print('~~~~~~~~~~~~~~~')
return df_all_time, best_res, best_threshold
# def euclidean_dist(self,query, support):
# init_weight = 2 * support
# init_bias = -torch.norm(support, dim=1)**2
# print(init_weight.shape)
# print(init_bias.shape)
# output_weight = init_weight.detach()
# output_bias = init_bias.detach()
# score2 = query.matmul(init_weight.t()) + init_bias-(torch.norm(query, dim=1)**2).unsqueeze(1)
# score2 = F.linear(query, init_weight, init_bias)
# score2 = score2-(torch.norm(query, dim=1)**2).unsqueeze(1)
# n = query.size(0)
# m = support.size(0)
# query = query.unsqueeze(1).expand(n, m, -1)
# support = support.unsqueeze(0).expand(n, m, -1)
# score = torch.pow(query - support, 2).sum(2)
# prob2 = F.softmax(score2, dim=1)
# prob1 = F.softmax(-score, dim=1)
# print(torch.argmax(prob1, dim=1))
# print(torch.argmax(prob2, dim=1))
# print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# return -score2
| wty0511/MSc_Individual_Project | src/models/TriNet.py | TriNet.py | py | 17,765 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.models.meta_learning.BaseModel",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": ... |
38785947998 | import cv2
import tensorflow as tf
import numpy as np
import mnist
import mnist_m
import svhn
import synthdigits
class DataInput(object):
def __init__(self, model_params, mnist_type, phase, is_train):
self.batch_size = model_params["batch_size"]
max_data_num = model_params["max_data_num"]
self.is_train = is_train
if mnist_type == "MNIST":
self.data_type = mnist.Mnist(is_train, max_data_num)
elif mnist_type == "MNIST_M":
self.data_type = mnist_m.MnistM(is_train, max_data_num)
elif mnist_type == "SVHN":
self.data_type = svhn.Svhn(is_train, max_data_num)
elif mnist_type == "SYNTHDIGITS":
self.data_type = synthdigits.SynthDigits(is_train, max_data_num)
self.file_size = self.data_type.file_size
def get_arg_dict(self, model_params):
arg_dict = dict()
arg_dict["feature"] = dict()
arg_dict["label"] = dict()
arg_dict["mask"] = dict()
for key in model_params:
if "data_arg" in key:
_, domain, field = key.split(".")
arg_dict[domain][field] = model_params[key]
arg_dict_list = list()
arg_dict_list.append(arg_dict["feature"])
arg_dict_list.append(arg_dict["label"])
arg_dict_list.append(arg_dict["mask"])
return arg_dict_list
def load_data(self):
image, label = self.data_type.load_data(self.batch_size)
return image, label
def center_crop(self, img):
i_height, i_width, i_cha = img.get_shape().as_list()
ccrop_size = [30, 30]
offset_height = int((i_height - ccrop_size[0]) / 2)
offset_width = int((i_width - ccrop_size[1]) / 2)
img = tf.image.crop_to_bounding_box(img,
offset_height, offset_width, ccrop_size[0], ccrop_size[1])
return img
def add_data_arg(self, imgs):
if self.is_train:
imgs = tf.map_fn(
lambda img: tf.image.random_brightness(img, 0.2),
imgs)
imgs = tf.map_fn(lambda img:
tf.image.random_contrast(img, 0.5, 1.5),
imgs)
imgs = tf.map_fn(lambda img:
tf.random_crop(img, [30, 30, 3]),
imgs)
else:
imgs = tf.map_fn(lambda img:
self.center_crop(img),
imgs)
return imgs
def get_input(self):
pass
def get_label(self):
pass
if __name__ == "__main__":
""" example of running the code"""
model_params = {"batch_size": 10}
data_input = DataInput(model_params, "SYNTHDIGITS", None, True)
for i in range(10):
image, label = data_input.load_data()
for i in range(10):
print(label[i])
cv2.imshow("img", image[i])
cv2.waitKey(0)
| hanzhaoml/MDAN | mnist/mnist_data_input.py | mnist_data_input.py | py | 2,986 | python | en | code | 102 | github-code | 1 | [
{
"api_name": "mnist.Mnist",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mnist_m.MnistM",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "svhn.Svhn",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "synthdigits.SynthDigits",
"l... |
37129892634 | from ..dateparser import DateSearchStatus, AbstractDateParser
from .time_parser import TimeParser
from .after_minutes_parser import AfterMinutesParser
from .after_hours_parser import AfterHoursParser
from .relative_day_parser import RelativeDayParser
from .day_month_parser import DayMonthParser
from .week_day_parser import WeekDayParser
from typing import List
from datetime import datetime, timedelta, time
class DateSearcher():
"""
Класс, содержащий парсеры дат.
Инкапсулирует конкретные парсеры.
"""
PARSERS = [
TimeParser(),
AfterHoursParser(),
AfterMinutesParser(),
RelativeDayParser(),
WeekDayParser(),
DayMonthParser()
]
# Время по умолчанию
FALLBACK_TIME = time(hour=10, minute=0)
def get_parsers(self) -> List[AbstractDateParser]:
return self.PARSERS
def parse(self, msg: str, current_datetime: datetime) -> DateSearchStatus:
status = DateSearchStatus(current_datetime)
for parser in self.get_parsers():
parser.search(msg, status)
if status.date and not status.time:
self.add_fallback_time(status)
if not status.date and status.time:
self.add_fallback_date(status)
return status
def add_fallback_date(self, status: DateSearchStatus) -> None:
"""
Добавляет дату по умолчанию.
Используется, когда указано только время.
Если сегодня больше времени, чем нужно, то напоминание будет установлено на завтра.
:param status:
:return:
"""
if status.current_datetime.time() > status.time:
status.date = status.current_datetime.date()
else:
status.date = status.current_datetime.date() + timedelta(days=1)
def add_fallback_time(self, status: DateSearchStatus):
"""
Добавляет время.
Используется, когда указано только дата.
Время по умолчанию захардкожено.
:param status:
:return:
"""
status.time = self.FALLBACK_TIME
| zolateater/reminder-bot | src/bot/dateparser/searcher.py | searcher.py | py | 2,341 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "time_parser.TimeParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "after_hours_parser.AfterHoursParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "after_minutes_parser.AfterMinutesParser",
"line_number": 19,
"usage_type": "ca... |
15189448225 | from Orange.misc.utils.embedder_utils import EmbedderCache
from Orange.util import dummy_callback
from orangecontrib.imageanalytics.utils.embedder_utils import ImageLoader
class LocalEmbedder:
embedder = None
def __init__(self, model, model_settings):
self.embedder = model_settings["model"]()
self._target_image_size = model_settings["target_image_size"]
self._image_loader = ImageLoader()
self._cache = EmbedderCache(model)
def embedd_data(self, file_paths, callback=dummy_callback):
all_embeddings = []
for i, row in enumerate(file_paths, start=1):
all_embeddings.append(self._embed(row))
callback(i / len(file_paths))
self._cache.persist_cache()
return all_embeddings
def _embed(self, file_path):
""" Load images and compute cache keys and send requests to
an http2 server for valid ones.
"""
image = self._image_loader.load_image_or_none(
file_path, self._target_image_size
)
if image is None:
return None
image = self.embedder.preprocess(image)
cache_key = self._cache.md5_hash(image)
cached_im = self._cache.get_cached_result_or_none(cache_key)
if cached_im is not None:
return cached_im
embedded_image = self.embedder.predict(image)
self._cache.add(cache_key, embedded_image)
return embedded_image
| biolab/orange3-imageanalytics | orangecontrib/imageanalytics/local_embedder.py | local_embedder.py | py | 1,463 | python | en | code | 32 | github-code | 1 | [
{
"api_name": "orangecontrib.imageanalytics.utils.embedder_utils.ImageLoader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Orange.misc.utils.embedder_utils.EmbedderCache",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "Orange.util.dummy_callback",
"li... |
23462163944 | #!/usr/bin/python3
"""Networking with Python.
Similar to the "2-post_email.py" task but with
`requests`.
"""
import sys
import requests
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.exit(1)
email = requests.post(
sys.argv[1],
data={"email": sys.argv[2]},
timeout=(6, 9)
)
print("{}".format(email.text))
| brian-ikiara/alx-higher_level_programming | 0x11-python-network_1/6-post_email.py | 6-post_email.py | py | 382 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 16... |
27463864604 | import os
import argparse
from os.path import join
import cv2
import dlib
from PIL import Image as pil_image
from tqdm import tqdm
import numpy as np
from pathlib import Path
from dataset import image_paths, _find_images
def get_boundingbox(face, width, height, scale=1.3, minsize=None):
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
size_bb = int(max(x2 - x1, y2 - y1) * scale)
if minsize:
if size_bb < minsize:
size_bb = minsize
center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
x1 = max(int(center_x - size_bb // 2), 0)
y1 = max(int(center_y - size_bb // 2), 0)
size_bb = min(width - x1, size_bb)
size_bb = min(height - y1, size_bb)
return x1, y1, size_bb
def processVideos(video_path, output_path,
start_frame=0, end_frame=None, cuda=True, video_name=None):
print('Starting: {}'.format(video_path))
reader = cv2.VideoCapture(video_path)
video_fn = video_path.split('/')[-1].split('.')[0] + '.avi'
fps = reader.get(cv2.CAP_PROP_FPS)
num_frames = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
print("Number of frames", num_frames)
# face detector
face_detector = dlib.get_frontal_face_detector()
font_face = cv2.FONT_HERSHEY_SIMPLEX
thickness = 2
font_scale = 1
frame_num = 0
if(start_frame >= num_frames - 1):
return
end_frame = end_frame if end_frame else num_frames
while reader.isOpened():
_, image = reader.read()
if image is None:
break
frame_num += 1
if frame_num < start_frame:
continue
height, width = image.shape[:2]
# 2. Detect with dlib
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_detector(gray, 1)
if len(faces):
# Largest face is taken
face = faces[0]
x, y, size = get_boundingbox(face, width, height)
cropped_face = image[y:y + size, x:x + size]
cropped_face = pil_image.fromarray(cropped_face)
cropped_face = cropped_face.resize((299, 299))
cropped_face = np.array(cropped_face)
cv2.imwrite(join(output_path, video_name + '_{:04d}.png'.format(frame_num)),
cropped_face)
if frame_num >= end_frame:
break
def resizeFrames(args):
directory = args.video_path
directories = sorted(map(str, filter(
lambda x: x.is_dir(), Path(directory).iterdir())))
for i, directory in enumerate(directories):
images = list(sorted(image_paths(directory)))
for image in images:
print("Directory Name", directory)
print("No of images", len(images))
x = pil_image.open(image)
x = x.resize((299, 299))
x.save(image, 'PNG')
def countFrames(video_path):
print('Starting: {}'.format(video_path))
reader = cv2.VideoCapture(video_path)
video_fn = video_path.split('/')[-1].split('.')[0]+'.avi'
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
fps = reader.get(cv2.CAP_PROP_FPS)
num_frames = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
print("Number of frames", num_frames)
if __name__ == '__main__':
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('--video_path', '-i', type=str)
p.add_argument('--output_path', '-o', type=str,
default='.')
p.add_argument('--start_frame', type=int, default=0)
p.add_argument('--end_frame', type=int, default=None)
#p.add_argument('--cuda', action='store_true')
args = p.parse_args()
video_path = args.video_path
if video_path.endswith('.mp4') or video_path.endswith('.avi'):
processVideos(**vars(args), video_name="fake")
else:
videos = os.listdir(video_path)
for video in videos:
video_name = video.split('.mp4')
args.video_path = join(video_path, video)
#countFrames(args.video_path)
processVideos(**vars(args), video_name=video_name[0])
| Aayushi0008/Deepfake-Detection | src/process_frames.py | process_frames.py | py | 4,282 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_COUNT",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "... |
18203106433 | import logging
from ..conversions.types import decode_dict
from . import messages
from .. import settings
logger = logging.getLogger(__name__)
READS_QUEUES = ("os2ds_representations",)
WRITES_QUEUES = (
"os2ds_handles",
"os2ds_matches",
"os2ds_checkups",
"os2ds_conversions",)
PROMETHEUS_DESCRIPTION = "Representations examined"
PREFETCH_COUNT = 8
def message_received_raw(body, channel, source_manager): # noqa: CCR001,E501 too high cognitive complexity
message = messages.RepresentationMessage.from_json_object(body)
representations = decode_dict(message.representations)
rule = message.progress.rule
logger.debug(f"{message.handle} with rules [{rule.presentation}] "
f"and representation [{list(representations.keys())}]")
try:
# Keep executing rules for as long as we can with the representations
# we have
conclusion, new_matches = rule.try_match(
representations,
obj_limit=max(1, settings.pipeline["matcher"]["obj_limit"]))
except Exception as e:
exception_message = "Matching error"
exception_message += ". {0}: ".format(type(e).__name__)
exception_message += ", ".join([str(a) for a in e.args])
logger.warning(exception_message)
for problems_q in ("os2ds_problems", "os2ds_checkups",):
yield (problems_q, messages.ProblemMessage(
scan_tag=message.scan_spec.scan_tag,
source=None, handle=message.handle,
message=exception_message).to_json_object())
return
final_matches = message.progress.matches + [
messages.MatchFragment(rule, matches or None)
for rule, matches in new_matches]
if isinstance(conclusion, bool):
# We've come to a conclusion!
logger.info(
f"{message.handle} done."
f" Matched status: {conclusion}")
for matches_q in ("os2ds_matches", "os2ds_checkups",):
yield (matches_q,
messages.MatchesMessage(
message.scan_spec, message.handle,
matched=conclusion,
matches=final_matches).to_json_object())
# Only trigger metadata scanning if the match succeeded
if conclusion:
yield ("os2ds_handles",
messages.HandleMessage(
message.scan_spec.scan_tag,
message.handle).to_json_object())
else:
new_rep = conclusion.split()[0].operates_on
# We need a new representation to continue
logger.debug(
f"{message.handle} needs"
f" new representation: [{new_rep}].")
yield ("os2ds_conversions",
messages.ConversionMessage(
message.scan_spec, message.handle,
message.progress._replace(
rule=conclusion,
matches=final_matches)).to_json_object())
if __name__ == "__main__":
from .run_stage import _compatibility_main # noqa
_compatibility_main("matcher")
| os2datascanner/os2datascanner | src/os2datascanner/engine2/pipeline/matcher.py | matcher.py | py | 3,213 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "conversions.types.decode_dict",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "run_stage._compatibility_main",
"line_number": 83,
"usage_type": "call"
}
] |
37583544991 | import pygame
import itertools
screen_size = (1920, 1080)
# pastel pink
LOW_COLOR = (255,209,220)
# deep blue
MED_COLOR = (7, 42, 108)
# red
PARTY_COLOR = (255, 0, 0)
color_map = {
'LOW': LOW_COLOR,
'MED': MED_COLOR,
'PARTY': PARTY_COLOR
}
class VibeLight:
def __init__(self):
self.is_on = False
def start(self):
pygame.init()
self.is_on = True
self.screen = pygame.display.set_mode(screen_size)
def set_mood(self, mood):
self.screen.fill(color_map[mood])
pygame.display.flip()
| shivenk78/Spotify-Mood-Detector | vibe_light.py | vibe_light.py | py | 556 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.displa... |
25952493404 | from flask import Flask, render_template,request, redirect, session
app = Flask(__name__)
app.secret_key = 'what sup?'
# our index route will handle rendering our form
@app.route('/')
def index():
return render_template("index.html")
@app.route('/process', methods=['POST'])
def submit_survey():
print("Got Post Info")
print(request.form)
session['username'] = request.form['name']
session['userlocation'] = request.form['location']
session['userlanguage'] = request.form['language']
session['usercomment'] = request.form['comment']
return redirect('/show')
@app.route('/show')
def show_user():
return render_template("show.html", name_on_template=session['username'], location_on_template=session['userlocation'],language_on_template=session['userlanguage'],comment_on_template=session['usercomment'])
if __name__ == "__main__":
app.run(debug=True, port = 5001)
| THEWENDI/Dojo-Survey | server.py | server.py | py | 907 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.request... |
6208541600 | # -*- coding: utf-8 -*-
"""
usage:
$ baseline_taskAB.py gold_file system_file taskName
- gold_file and system_file are tab-separated, UTF-8 encoded files
- taskName is the name of the task (A|B)
"""
import argparse, sys
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import confusion_matrix
def preproc(infile, task):
y = []
reader = infile.readlines()
for row in reader:
if row.startswith("id"):
continue
if task == "A":
label = row.split('\t')[2]
elif task == "B":
label = row.split('\t')[3].rstrip()
y.append(label)
return y
def eval(y_test, y_predicted):
precision, recall, fscore, _ = score(y_test, y_predicted)
print('\n {0} {1}'.format("0","1"))
print('P: {}'.format(precision))
print('R: {}'.format(recall))
print('F: {}'.format(fscore))
#"""
_, _, fscore, _ = score(y_test, y_predicted, average='macro')
print('Macro-F1: {}'.format(fscore))
print('\n Confusion matrix:')
print(confusion_matrix(y_test, y_predicted))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument('test', help='haspeede test set')
parser.add_argument('predicted', help='system output')
parser.add_argument('task', type=str, help="the name of the task, i.e. A or B")
args = parser.parse_args()
with open(sys.argv[1], 'r',encoding="utf8") as f:
y_test = preproc(f,sys.argv[3])
with open(sys.argv[2], 'r', encoding="utf8") as f:
y_predicted = preproc(f,sys.argv[3])
eval(y_test, y_predicted)
| msang/haspeede | 2020/eval_taskAB.py | eval_taskAB.py | py | 1,742 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "sklearn.metrics.precision_recall_fscore_support",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_fscore_support",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_numb... |
36579701554 | #!/usr/bin/env python
# coding: utf-8
# - Edge weight is inferred by GNNExplainer and node importance is given by five Ebay annotators. Not every annotator has annotated each node.
# - Seed is the txn to explain.
# - id is the community id.
import math
from tqdm.auto import tqdm
import random
import pandas as pd
import numpy as np
import networkx as nx
import itertools
from collections import Counter
import scipy.stats
import sklearn
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--random-draw', action = 'store', dest = 'random_draw', type = int, default = 100, help = 'Random draws to break the tie in ranking topk edges.')
parser.add_option('--edge-agg', action = 'store', dest = 'edge_agg', default = 'avg', choices = ['avg', 'min', 'sum'], help = 'Aggregation method to compute edge importance score based on the node importance scores.')
# parser.add_option('--explainer-w', action = 'store', dest = 'explainer_w', default = '0', type = float, help = 'Learned parameter for explainer weights.')
parser.add_option('-c', '--centrality-w', action = 'store', dest = 'centrality_w', default = '0', type = float, help = 'Learned parameter for centrality measures.')
(options, args) = parser.parse_args()
print ("Options:", options)
explainer_w = 1-options.centrality_w
learner = 'grid-{}'.format(options.centrality_w)
# Load in the annotation file, the data seed, the edge weights by explainer, the edges in the communities.
DataNodeImp = pd.read_csv('../05GNNExplainer-eval-hitrate/input/annotation_publish.csv')
DataSeed = pd.read_csv('../05GNNExplainer-eval-hitrate/input/data-seed.txt')
DataEdgeWeight = pd.read_csv('../05GNNExplainer-eval-hitrate/input/data-edge-weight.txt')
df_e = pd.read_csv('../05GNNExplainer-eval-hitrate/input/masked_df_e.csv')
x_y_df = pd.read_csv('x_y_df_learn.csv')
del x_y_df['max_hitrate']
x_y_df['combined_weights'] = explainer_w * x_y_df['explainer'] + options.centrality_w * x_y_df['edge_btw']
print('AUC score of the sample:', roc_auc_score(DataSeed.y, DataSeed['yhat']))
# Communities labeled 0 and 1.
comm0 = DataSeed[DataSeed.y==0].id.unique()
comm1 = DataSeed[DataSeed.y==1].id.unique()
df_node_weight = pd.read_csv('./results/df_node_weight_with_avgimp.csv')
# Preprocess explainer weights: calculate undirectional edge weight by taking the max weight of bidirectional edge weights.
# From node importance scores to edge importance score: "min"/"avg"/"sum".
df_edge_weight = df_e.copy()
df_edge_weight['importance'] = None
df_edge_weight['weight'] = None
df_edge_weight['weight_positive'] = None
df_edge_weight['weight_negative'] = None
for i, row in tqdm(df_edge_weight.iterrows(), total=len(df_edge_weight), ncols=80, mininterval=5):
src_node_id = row['source']
dst_node_id = row['target']
cc_id = row['community_id']
src_row = df_node_weight[(df_node_weight['node_id']==src_node_id) & (df_node_weight['community_id']==cc_id)].iloc[0]
dst_row = df_node_weight[(df_node_weight['node_id']==dst_node_id) & (df_node_weight['community_id']==cc_id)].iloc[0]
if options.edge_agg == 'min':
edge_imp_annotate = min(src_row['importance_avg'], dst_row['importance_avg'])
if options.edge_agg == 'avg':
edge_imp_annotate = np.mean([src_row['importance_avg'], dst_row['importance_avg']])
if options.edge_agg == 'sum':
edge_imp_annotate = src_row['importance_avg'] + dst_row['importance_avg']
edge_weights = DataEdgeWeight[DataEdgeWeight['src'].isin([src_node_id, dst_node_id]) &
DataEdgeWeight['dst'].isin([src_node_id, dst_node_id]) &
DataEdgeWeight['id'].isin([cc_id])]['edge_weight'].max()
df_edge_weight['importance'].iloc[i] = edge_imp_annotate
df_edge_weight['weight'].iloc[i] = edge_weights
df_edge_weight['weight_positive'].iloc[i] = DataEdgeWeight[DataEdgeWeight['src'].isin([src_node_id]) &
DataEdgeWeight['dst'].isin([dst_node_id]) &
DataEdgeWeight['id'].isin([cc_id])]['edge_weight'].iloc[0]
df_edge_weight['weight_negative'].iloc[i] = DataEdgeWeight[DataEdgeWeight['src'].isin([dst_node_id]) &
DataEdgeWeight['dst'].isin([src_node_id]) &
DataEdgeWeight['id'].isin([cc_id])]['edge_weight'].iloc[0]
df_edge_weight['combined_weights'] = x_y_df['combined_weights']
# Avg edge/community.
print('Average edges per community:', df_edge_weight.shape[0]/41)
df_edge_weight.rename(columns={'source':'src', 'target': 'dst', 'community_id': 'id',
'importance': 'edge_importance', 'combined_weights': 'edge_weight'}, inplace=True)
df_edge_weight.to_csv('./results/df_edge_weight_imp-{}-{}.csv'.format(options.edge_agg, learner))
df_edge_weight.rename(columns={'edge_importance':'importance'}, inplace=True)
df_edge_weight = df_edge_weight.reset_index()
# Topk hit rate
hitrate_df = pd.DataFrame(index=['all', 'comm0', 'comm1'] + list(range(0,41)))
for k in [i*5 for i in range(1,11)]:
hitrate_list_topk_comm = []
for cid in df_edge_weight.id.unique():
df_edge_weight_sub = df_edge_weight[df_edge_weight.id==cid]
imp_largest = sorted(dict(Counter(df_edge_weight_sub.importance)).items(), reverse=True)[0][0]
count_largest = sorted(dict(Counter(df_edge_weight_sub.importance)).items(), reverse=True)[0][1]
hitrate_list_topk = []
for r in tqdm(range(0,options.random_draw), total=options.random_draw, ncols=80, mininterval=5):
random.seed(r)
if count_largest <= k:
src_id_human_topk = df_edge_weight_sub[['src','dst']].values.tolist()
else:
all_human_top_edge_idx = df_edge_weight_sub[df_edge_weight_sub.importance == imp_largest].index
human_topk_edge_idx = random.sample(list(all_human_top_edge_idx), k)
src_id_human_topk = df_edge_weight.iloc[human_topk_edge_idx][['src','dst']].values.tolist()
explainer_topk_edge = df_edge_weight_sub.sort_values(by=['edge_weight'], ascending=False)[['edge_weight', 'src', 'dst']][:k]
src_id_explainer_topk = explainer_topk_edge[['src','dst']].values.tolist()
hitrate = len([p for p in src_id_explainer_topk if p in src_id_human_topk or (p[1], p[0]) in src_id_human_topk])/k
hitrate_list_topk.append(hitrate)
hitrate_list_topk_comm.append(np.mean(hitrate_list_topk))
all_hitrate = np.mean(hitrate_list_topk_comm)
comm0_hitrate = np.mean([h for (i,h) in enumerate(hitrate_list_topk_comm) if i in comm0])
comm1_hitrate = np.mean([h for (i,h) in enumerate(hitrate_list_topk_comm) if i in comm1])
hitrate_df['top{}'.format(k)] = [all_hitrate, comm0_hitrate, comm1_hitrate] + hitrate_list_topk_comm
hitrate_df.to_csv('./results/topk-{}-{}-{}.csv'.format(options.random_draw, options.edge_agg, learner), index=True)
ours = hitrate_df.loc[['all', 'comm0', 'comm1']]
print('Our topk hit rate:', ours)
print(ours)
# In[17]:
train = hitrate_df.loc[range(0,21)]
test = hitrate_df.loc[range(21, 41)]
all = hitrate_df.loc[range(0, 41)]
np.mean(train).to_csv('./results/ours_{}_train.csv'.format(learner))
np.mean(test).to_csv('./results/ours_{}_test.csv'.format(learner))
np.mean(all).to_csv('./results/ours_{}_all.csv'.format(learner))
| eBay/xFraud | xfraud/supplement/07Learning_hybrid/ours_learn-grid-A.py | ours_learn-grid-A.py | py | 7,760 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "optparse.OptionParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.... |
19205793084 |
# Code from Chapter 3 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
# Demonstration of the Perceptron on the Pima Indian dataset
# This dataset is now on kaggle https://www.kaggle.com/datasets/uciml/pima-indians-diabetes-database
# Just press F5 - runs interactively in IDE
# Ref kaggle solution https://www.kaggle.com/code/pradeepgurav/pima-diabetes-98-accuracy/notebook
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sandbox.ann.np_bare_ann import NeuralNet_By_Numpy
def do_confusion_matrix(y_train, y_pred, plt):
plt.figure()
cm = confusion_matrix(y_train, y_pred)
sns.set(color_codes=True)
sns.set(font_scale=1)
sns.heatmap(cm, annot=True, fmt='g')
SMALL_TRAIN_DS = '/diabetes.csv'
BIG_TRAIN_DS = '/diabetes75pc_100_times.csv'
DATASET_CSV_FILE = os.getcwd() + BIG_TRAIN_DS
EPOCH = 49
df_orig = pd.read_csv(DATASET_CSV_FILE)
feature_names = df_orig.columns[:8].tolist()
print(f'-- all features: {feature_names}')
X = df_orig[feature_names]
y = df_orig.Outcome
#print(X.shape)
#print(y.shape)
# Assumed best chosen features from the ref solution
selected_features = ['Pregnancies', 'Glucose', 'BMI', 'DiabetesPedigreeFunction']
print(f'-- selected features: {selected_features}')
# StandardScaler() scales each dimension to 0-mean and unit variance e.g. var == 1
X = StandardScaler().fit_transform(X[selected_features])
# Splitting data into training and testing
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.25, # 25%
stratify=y, # maintain balance between classes https://stackoverflow.com/questions/54600907/does-the-train-test-split-function-keep-the-balance-between-classes
#random_state=1989
)
print(f'-- Train size = {X_train.shape[0]}')
print(f'-- Test size = {X_test.shape[0]}')
train_0 = np.count_nonzero(y_train==0)
train_1 = np.count_nonzero(y_train==1)
test_0 = np.count_nonzero(y_test==0)
test_1 = np.count_nonzero(y_test==1)
print(f'-- in test: diabetic/non-diabetic = {test_1/test_0}')
print(f'-- in train: diabetic/non-diabetic = {train_1/train_0}')
print(f'-- X_train shape: {X_train.shape}')
print(f'-- y_train shape: {y_train.shape}')
# start training
print('Training..')
nnet = NeuralNet_By_Numpy(X_train, y_train, batch_size=16, hidden_layers=[128])
for _ in range(EPOCH):
nnet.iteration_train()
y_pred = nnet.iteration_predict()
y_pred = np.where(y_pred > 0.5, 1, 0)
print(' -- Accuracy: {:.2f}%'.format(accuracy_score(y_train, y_pred) * 100))
print(' -- F1 : {:.2f}%'.format(f1_score(y_train, y_pred) * 100))
# confusion matrix over training data
y_pred = nnet.predict(X_train)
y_pred = np.where(y_pred > 0.5, 1, 0)
do_confusion_matrix(y_train, y_pred, plt)
# test perf
print('Testing..')
y_pred = nnet.predict(X_test)
y_pred = np.where(y_pred > 0.5, 1, 0)
print(' -- Accuracy: {:.2f}%'.format(accuracy_score(y_test, y_pred) * 100))
print(' -- F1 : {:.2f}%'.format(f1_score(y_test, y_pred) * 100))
# confusion matrix over training data
do_confusion_matrix(y_test, y_pred, plt)
# check training perf
#train_pred = nnet.predict(X_train)
#train_pred = np.where(train_pred > 0.5, 1, 0)
# train: confusion matrix
#train_cm = confusion_matrix(y_train, train_pred)
#print(train_cm)
plt.show()
| hiryou/ml-ludus | ludus/book_practice/ml_algo_pers/Ch3/pima_bare_ann.py | pima_bare_ann.py | py | 3,774 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 33,
"usage_type": "call"
},
{
"api_n... |
43753374625 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import torch
import rospy
import numpy as np
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from yolov5_ros_msgs.msg import BoundingBox, BoundingBoxes
from yolo_new.msg import Flag,Serial_RT
IsMoving = 0
SingleSortOK = 1
class Yolo_Dect:
def __init__(self):
# load parameters
yolov5_path = rospy.get_param('/yolov5_path', '')
weight_path = rospy.get_param('~weight_path', '')
image_topic = rospy.get_param(
'~image_topic', '/camera/color/image_raw')
pub_topic = rospy.get_param('~pub_topic', '/yolov5/BoundingBoxes')
self.camera_frame = rospy.get_param('~camera_frame', '')
conf = rospy.get_param('~conf', '0.5')
# load local repository(YoloV5:v6.0)
self.model = torch.hub.load(yolov5_path, 'custom',
path=weight_path, source='local')
self.model.cpu()
# which device will be used
#if (rospy.get_param('/use_cpu', 'false')):
# self.model.cpu()
#else:
# self.model.cuda()
self.model.conf = conf
self.model.iou = 0.1 #待测试~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.color_image = Image()
self.depth_image = Image()
self.getImageStatus = False
self.classes_colors = {}
# image subscribe
self.color_sub = rospy.Subscriber(image_topic, Image, self.image_callback,
queue_size=1, buff_size=52428800)
self.flag_sub = rospy.Subscriber("/Flag_pub", Flag, self.flag_callback)#订阅移动状态话题
# output publishers
#queue_size 实时收发信息
self.position_pub = rospy.Publisher(
pub_topic, BoundingBoxes, queue_size=1)
self.image_pub = rospy.Publisher(
'/yolov5/detection_image', Image, queue_size=1)
# if no image messages
while (not self.getImageStatus):
rospy.loginfo("waiting for image.")
rospy.sleep(2)
def flag_callback(self,msg):
global IsMoving
global SingleSortOK
IsMoving = msg.isMoving
SingleSortOK = msg.singleSortOK
def image_callback(self, image):
global IsMoving
global SingleSortOK
self.boundingBoxes = BoundingBoxes()
self.boundingBoxes.header = image.header
self.boundingBoxes.image_header = image.header
self.getImageStatus = True
self.color_image = np.frombuffer(image.data, dtype=np.uint8).reshape(
image.height, image.width, -1)
self.color_image = cv2.cvtColor(self.color_image, cv2.COLOR_BGR2RGB)
self.color_image = self.white_balance_1(self.color_image)
results = self.model(self.color_image)
# xmin ymin xmax ymax confidence class name
boxs = results.pandas().xyxy[0].values
self.dectshow(self.color_image, boxs, image.height, image.width)
cv2.waitKey(3)
def white_balance_1(self,img):
'''
第一种简单的求均值白平衡法
:param img: cv2.imread读取的图片数据
:return: 返回的白平衡结果图片数据
'''
# 读取图像
r, g, b = cv2.split(img)
r_avg = cv2.mean(r)[0]
g_avg = cv2.mean(g)[0]
b_avg = cv2.mean(b)[0]
# 求各个通道所占增益
k = (r_avg + g_avg + b_avg) / 3
kr = k / r_avg #+ 0.1
kg = k / g_avg #+ 0.1
kb = k / b_avg
r = cv2.addWeighted(src1=r, alpha=kr, src2=0, beta=0, gamma=0)
g = cv2.addWeighted(src1=g, alpha=kg, src2=0, beta=0, gamma=0)
b = cv2.addWeighted(src1=b, alpha=kb, src2=0, beta=0, gamma=0)
balance_img = cv2.merge([b, g, r])
return balance_img
def dectshow(self, org_img, boxs, height, width):
img = org_img.copy()
count = 0
for i in boxs:
count += 1
for box in boxs:
# tmp_flag = judge_result(box) # 判断识别结果是否合格
# if tmp_flag == 0:
# count -= 1
# continue
boundingBox = BoundingBox()
boundingBox.probability = np.float64(box[4])
boundingBox.xmin = np.int64(box[0])
boundingBox.ymin = np.int64(box[1])
boundingBox.xmax = np.int64(box[2])
boundingBox.ymax = np.int64(box[3])
boundingBox.xmid = (np.int64(box[0])+np.int64(box[2]))/2
boundingBox.ymid = (np.int64(box[1])+np.int64(box[3]))/2
boundingBox.num = np.int16(count)
boundingBox.Class = box[-1]
boundingBox.CNum = self.switch_class(boundingBox.Class) #传入垃圾类别并进行判断分类
boundingBox.ONum = self.switch_num(boundingBox.Class)
if box[-1] in self.classes_colors.keys():
color = self.classes_colors[box[-1]]
else:
color = np.random.randint(0, 183, 3)
self.classes_colors[box[-1]] = color
cv2.rectangle(img, (int(box[0]), int(box[1])),
(int(box[2]), int(box[3])), (int(color[0]), int(color[1]), int(color[2])), 2)
if box[1] < 20:
text_pos_y = box[1] + 30
else:
text_pos_y = box[1] - 10
# cv2.putText(img, box[-1] + label,
# (int(box[0]), int(text_pos_y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2,
# cv2.LINE_AA)
cv2.putText(img, box[-1],
(int(box[0]), int(text_pos_y) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2,
cv2.LINE_AA)
self.boundingBoxes.bounding_boxes.append(boundingBox)
self.position_pub.publish(self.boundingBoxes)
self.publish_image(img, height, width)
# cv2.imshow('YOLOv5', img)
def switch_class(self,bclass):# 根据yolo返回类别进行类别分类 CNum
if bclass == "recycle_can" or bclass == "recycle_bottle" or bclass == "recycle_paper":
return 1
elif bclass == "harm_battery" :
return 2
elif bclass == "kitchen_potato" or bclass == "kitchen_ternip" or bclass == "kitchen_carrot" :
return 3
elif bclass == "others_chip" or bclass == "others_stone":
return 4
else :
return 999
def switch_num(self,bclass): #定义垃圾类别 ONum
if bclass == 'recycle_can':
return 1
elif bclass == 'recycle_bottle':
return 2
elif bclass == 'recycle_paper':
return 3
elif bclass == 'harm_battery':
return 4
elif bclass == 'kitchen_ternip':#白萝卜
return 5
elif bclass == 'kitchen_carrot':
return 6
elif bclass == 'kitchen_potato':
return 7
elif bclass == 'others_chip':#瓷片
return 8
elif bclass == 'others_stone':
return 9
else :
return 999
def judge_result(self,box): #判断检测结果是否合格 默认返回1,不合格返回0
xmin = np.int64(box[0])
ymin = np.int64(box[1])
xmax = np.int64(box[2])
ymax = np.int64(box[3])
aera = (xmax-xmin)*(ymax-ymin)
xmid = (np.int64(box[0])+np.int64(box[2]))/2
ymid = (np.int64(box[1])+np.int64(box[3]))/2
Class = box[-1]
CNum = self.switch_class(Class)
ONum = self.switch_num(Class)
if boundingBox.ONum == 4: #电池
if aera > 100: # 示例,待测
return 0
return 1
def publish_image(self, imgdata, height, width):
image_temp = Image()
header = Header(stamp=rospy.Time.now())
header.frame_id = self.camera_frame
image_temp.height = height
image_temp.width = width
image_temp.encoding = 'bgr8'
image_temp.data = np.array(imgdata).tobytes()
image_temp.header = header
image_temp.step = width * 3
self.image_pub.publish(image_temp)
def main():
rospy.init_node('yolov5_ros', anonymous=True)
yolo_dect = Yolo_Dect()
rospy.spin()
if __name__ == "__main__":
main()
| Anxy02/Refuse-Classification-Machine | src/yolov5_ros/scripts/yolo_v5.py | yolo_v5.py | py | 8,438 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "rospy.get_param",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "rospy.get_param",
... |
2819023333 | import pygame
import random
import math
import pygetwindow as gw
pygame.init()
# Configuración de la pantalla
screen_info = pygame.display.Info()
screen_width = screen_info.current_w
screen_height = screen_info.current_h
# Obtener todas las ventanas abiertas
windows = gw.getWindowsWithTitle('')
target_windows = [window for window in windows if window.isMinimized == False]
# Clase para representar la animación
class Animation:
def __init__(self, window):
self.window = window
self.speed = 5
self.rect = pygame.Rect(0, 0, 50, 50)
self.image = pygame.Surface((50, 50), pygame.SRCALPHA)
self.image.fill((0, 0, 0, 0)) # Fondo transparente
pygame.draw.circle(self.image, (255, 0, 0), (25, 25), 25) # Círculo rojo
def update(self):
mouse_x, mouse_y = pygame.mouse.get_pos()
angle = math.atan2(mouse_y - self.rect.centery, mouse_x - self.rect.centerx)
self.rect.x += self.speed * math.cos(angle)
self.rect.y += self.speed * math.sin(angle)
def draw(self, screen):
screen.blit(self.image, (self.rect.x, self.rect.y))
# Lista de animaciones para cada ventana
animations = [Animation(window) for window in target_windows]
# Bucle principal
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Actualizar las animaciones
for animation in animations:
animation.update()
# Dibujar las animaciones sobre las ventanas
for animation in animations:
if animation.window.isActive:
window_rect = animation.window.left, animation.window.top, animation.window.width, animation.window.height
pygame.draw.rect(pygame.display.get_surface(), (0, 0, 0, 0), window_rect) # Limpiar ventana
animation.draw(pygame.display.get_surface())
pygame.display.flip()
pygame.quit()
| SicerBrito/Scripts | Etica/bb/f.py | f.py | py | 1,917 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.Info",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygetwindow.getWindo... |
23983252086 | import pygame
from MENU.Application import Application
from CORE.main_junction import core
#this class file served to activate the game with the selected paramters previously
class Play(Application):
def __init__(self, game):
Application.__init__(self, game)
self.player_parameters = [["PLAYER1","Sanic", "keyboard1"],
["0COM2", "Alexander", "keyboard2"]]
self.game_parameters = ["score_3", "metal1"]
def display_menu(self):
self.run_display = True
while self.run_display:
self.game.check_events()
core(self.game.system_parameters, self.player_parameters, self.game_parameters)
pygame.mixer.music.load(r'MENU\midnight-ride-01a.wav')
pygame.mixer.music.set_volume(0.4)
pygame.mixer.music.play(-1)
self.run_display = False
| SlyLeoX/Cyber-Puck | Cyberpuck_ReleaseDirectory/MENU/Play.py | Play.py | py | 866 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "MENU.Application.Application",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "MENU.Application.Application.__init__",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "MENU.Application.Application",
"line_number": 9,
"usage_type": "name"
... |
40826778374 | import datetime
import glob
import os
import subprocess
import numpy as np
import pandas as pd
import pose
import poses
import rosbag
# Forward errors so we can recover failures
# even when running commands through multiprocessing
# pooling
def full_traceback(func):
import functools
import traceback
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
msg = "{}\n\nOriginal {}".format(e, traceback.format_exc())
raise type(e)(msg)
return wrapper
def get_files(directory, file_string):
return glob.glob(os.path.join(directory, file_string))
def get_files_recursive(directory, file_string):
subdirectory_csv_files = []
_, subdirectories, _ = next(os.walk(directory))
for subdirectory in subdirectories:
subdirectory_path = os.path.join(directory, subdirectory)
for subdirectory_csv_file in get_files(subdirectory_path, file_string):
subdirectory_csv_files.append(subdirectory_csv_file)
return subdirectory_csv_files
def create_directory(directory=None):
if directory == None:
directory = os.path.join(
os.getcwd(), datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
if os.path.exists(directory):
print((directory + " already exists!"))
exit()
os.makedirs(directory)
return directory
def load_dataframe(files):
dataframes = [pd.read_csv(file) for file in files]
dataframe = pd.concat(dataframes)
return dataframe
def run_command_and_save_output(command, output_filename, print_command=True):
if print_command:
print(command)
with open(output_filename, "w") as output_file:
subprocess.call(command, shell=True, stdout=output_file, stderr=output_file)
def basename(filename):
return os.path.splitext(os.path.basename(filename))[0]
# TODO(rsoussan): Move these to different utilities file
def get_topic_rates(
bag_name,
topic,
min_time_diff_for_gap,
use_header_time=True,
verbose=False,
ignore_zero_time_diffs=True,
):
with rosbag.Bag(bag_name, "r") as bag:
last_time = 0.0
gaps = 0
time_diffs = []
for _, msg, t in bag.read_messages([topic]):
time = (
msg.header.stamp.secs + msg.header.stamp.nsecs * 1.0e-9
if use_header_time
else t.secs + t.nsecs * 1.0e-9
)
time_diff = time - last_time
if last_time != 0 and time_diff >= min_time_diff_for_gap:
if verbose:
print(
(
topic
+ " Gap: time: "
+ str(time)
+ ", last_time: "
+ str(last_time)
+ ", diff: "
+ str(time_diff)
)
)
gaps += 1
if last_time != 0 and (time_diff != 0 or not ignore_zero_time_diffs):
time_diffs.append(time_diff)
last_time = time
mean_time_diff = np.mean(time_diffs)
min_time_diff = np.min(time_diffs)
max_time_diff = np.max(time_diffs)
stddev_time_diff = np.std(time_diffs)
if verbose:
if use_header_time:
print("Using Header time.")
else:
print("Using Receive time.")
print(
(
"Found "
+ str(gaps)
+ " time diffs >= "
+ str(min_time_diff_for_gap)
+ " secs."
)
)
print(("Mean time diff: " + str(mean_time_diff)))
print(("Min time diff: " + str(min_time_diff)))
print(("Max time diff: " + str(max_time_diff)))
print(("Stddev time diff: " + str(stddev_time_diff)))
def make_absolute_poses_from_relative_poses(absolute_poses, relative_poses, name):
starting_relative_time = relative_poses.times[0]
np_times = np.array(absolute_poses.times)
closest_index = np.argmin(np.abs(np_times - starting_relative_time))
start_pose = absolute_poses.pose(closest_index)
new_pose = start_pose
new_poses_list = [start_pose]
new_poses_times = [absolute_poses.times[closest_index]]
for index in range(len(relative_poses.times)):
relative_pose = relative_poses.pose(index)
new_pose = new_pose * relative_pose
new_poses_list.append(new_pose)
new_poses_times.append(relative_poses.times[index])
new_poses = poses.Poses(name, "")
new_poses.init_from_poses(new_poses_list, new_poses_times)
return new_poses
def integrate_velocities(localization_states):
delta_times = [
j - i
for i, j in zip(localization_states.times[:-1], localization_states.times[1:])
]
# Make sure times are same length as velocities, ignore last velocity
delta_times.append(0)
# TODO(rsoussan): Integrate angular velocities?
# TODO(rsoussan): central difference instead?
x_increments = [
velocity * delta_t
for velocity, delta_t in zip(localization_states.velocities.xs, delta_times)
]
y_increments = [
velocity * delta_t
for velocity, delta_t in zip(localization_states.velocities.ys, delta_times)
]
z_increments = [
velocity * delta_t
for velocity, delta_t in zip(localization_states.velocities.zs, delta_times)
]
return add_increments_to_absolute_pose(
x_increments,
y_increments,
z_increments,
localization_states.positions.xs[0],
localization_states.positions.ys[0],
localization_states.positions.zs[0],
localization_states.times,
"Integrated Graph Velocities",
)
def add_increments_to_absolute_pose(
x_increments,
y_increments,
z_increments,
starting_x,
starting_y,
starting_z,
times,
poses_name="Increment Poses",
):
integrated_positions = poses.Poses(poses_name, "")
cumulative_x_increments = np.cumsum(x_increments)
integrated_positions.positions.xs = [
starting_x + cumulative_x_increment
for cumulative_x_increment in cumulative_x_increments
]
cumulative_y_increments = np.cumsum(y_increments)
integrated_positions.positions.ys = [
starting_y + cumulative_y_increment
for cumulative_y_increment in cumulative_y_increments
]
cumulative_z_increments = np.cumsum(z_increments)
integrated_positions.positions.zs = [
starting_z + cumulative_z_increment
for cumulative_z_increment in cumulative_z_increments
]
# Add start positions
integrated_positions.positions.xs.insert(0, starting_x)
integrated_positions.positions.ys.insert(0, starting_y)
integrated_positions.positions.zs.insert(0, starting_z)
# Remove last elements (no timestamp for these)
del integrated_positions.positions.xs[-1]
del integrated_positions.positions.ys[-1]
del integrated_positions.positions.zs[-1]
integrated_positions.times = times
return integrated_positions
| InnovativeDigitalSolution/NASA_astrobee | tools/graph_bag/scripts/utilities.py | utilities.py | py | 7,299 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "traceback.format_exc",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"li... |
41035568274 | import typing
from sqlalchemy import and_
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import column
from sqlalchemy import DateTime
from sqlalchemy import false
from sqlalchemy import Float
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import true
# builtin.pyi stubs define object.__eq__() as returning bool, which
# can't be overridden (it's final). So for us to type `__eq__()` and
# `__ne__()`, we have to use type: ignore[override]. Test if does this mean
# the typing tools don't know the type, or if they just ignore the error.
# (it's fortunately the former)
expr1 = column("x", Integer) == 10
c1 = column("a", String)
c2 = column("a", Integer)
expr2 = c2.in_([1, 2, 3])
expr2_set = c2.in_({1, 2, 3})
expr2_gen = c2.in_((x for x in (1, 2, 3)))
nexpr2 = c2.not_in([1, 2, 3])
nexpr2_set = c2.not_in({1, 2, 3})
nexpr2_gen = c2.not_in((x for x in (1, 2, 3)))
short_cir1 = and_(True, c2 == 5)
short_cir2 = or_(False, c2 == 5)
short_cir3 = and_(true(), c2 == 5)
short_cir4 = or_(false(), c2 == 5)
# EXPECTED_MYPY: Missing positional argument "initial_clause" in call to "and_"
no_empty_1 = and_()
# EXPECTED_MYPY: Missing positional argument "initial_clause" in call to "or_"
no_empty_2 = or_()
expr3 = c2 / 5
expr4 = -c2
expr5 = ~(c2 == 5)
q = column("q", Boolean)
expr6 = ~q
expr7 = c1 + "x"
expr8 = c2 + 10
stmt = select(column("q")).where(lambda: column("g") > 5).where(c2 == 5)
expr9 = c1.bool_op("@@")(func.to_tsquery("some & query"))
def test_issue_9418() -> None:
and_(c1.is_(q))
and_(c1.is_not(q))
and_(c1.isnot(q))
and_(c1.not_in(["x"]))
and_(c1.notin_(["x"]))
and_(c1.not_like("x"))
and_(c1.notlike("x"))
and_(c1.not_ilike("x"))
and_(c1.notilike("x"))
def test_issue_9451() -> None:
# issue #9451
c1.cast(Integer)
c1.cast(Float)
c1.op("foobar")("operand").cast(DateTime)
cast(c1, Float)
cast(c1.op("foobar")("operand"), DateTime)
def test_issue_9650_char() -> None:
and_(c1.contains("x"))
and_(c1.startswith("x"))
and_(c1.endswith("x"))
and_(c1.icontains("x"))
and_(c1.istartswith("x"))
and_(c1.iendswith("x"))
def test_issue_9650_bitwise() -> None:
# EXPECTED_TYPE: BinaryExpression[Any]
reveal_type(c2.bitwise_and(5))
# EXPECTED_TYPE: BinaryExpression[Any]
reveal_type(c2.bitwise_or(5))
# EXPECTED_TYPE: BinaryExpression[Any]
reveal_type(c2.bitwise_xor(5))
# EXPECTED_TYPE: UnaryExpression[int]
reveal_type(c2.bitwise_not())
# EXPECTED_TYPE: BinaryExpression[Any]
reveal_type(c2.bitwise_lshift(5))
# EXPECTED_TYPE: BinaryExpression[Any]
reveal_type(c2.bitwise_rshift(5))
# EXPECTED_TYPE: ColumnElement[int]
reveal_type(c2 << 5)
# EXPECTED_TYPE: ColumnElement[int]
reveal_type(c2 >> 5)
if typing.TYPE_CHECKING:
# as far as if this is ColumnElement, BinaryElement, SQLCoreOperations,
# that might change. main thing is it's SomeSQLColThing[bool] and
# not 'bool' or 'Any'.
# EXPECTED_RE_TYPE: sqlalchemy..*ColumnElement\[builtins.bool\]
reveal_type(expr1)
# EXPECTED_RE_TYPE: sqlalchemy..*ColumnClause\[builtins.str.?\]
reveal_type(c1)
# EXPECTED_RE_TYPE: sqlalchemy..*ColumnClause\[builtins.int.?\]
reveal_type(c2)
# EXPECTED_RE_TYPE: sqlalchemy..*BinaryExpression\[builtins.bool\]
reveal_type(expr2)
# EXPECTED_RE_TYPE: sqlalchemy..*ColumnElement\[Union\[builtins.float, .*\.Decimal\]\]
reveal_type(expr3)
# EXPECTED_RE_TYPE: sqlalchemy..*UnaryExpression\[builtins.int.?\]
reveal_type(expr4)
# EXPECTED_RE_TYPE: sqlalchemy..*ColumnElement\[builtins.bool.?\]
reveal_type(expr5)
# EXPECTED_RE_TYPE: sqlalchemy..*ColumnElement\[builtins.bool.?\]
reveal_type(expr6)
# EXPECTED_RE_TYPE: sqlalchemy..*ColumnElement\[builtins.str\]
reveal_type(expr7)
# EXPECTED_RE_TYPE: sqlalchemy..*ColumnElement\[builtins.int.?\]
reveal_type(expr8)
# EXPECTED_TYPE: BinaryExpression[bool]
reveal_type(expr9)
| sqlalchemy/sqlalchemy | test/typing/plain_files/sql/sql_operations.py | sql_operations.py | py | 4,144 | python | en | code | 8,024 | github-code | 1 | [
{
"api_name": "sqlalchemy.column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy... |
30094361627 |
#### with lru caching method #########
from functools import lru_cache
@lru_cache(maxsize=16)
def fib(n):
if n<=2 :
return 1
return fib(n-1) + fib(n-2)
print(fib(50))
######################### with memoization technique #########
def fib(n,memo):
if (n in memo):
return memo[n]
if n<=2 :
return 1
memo[n] = fib(n-1,memo) + fib(n-2,memo)
return memo[n]
memo={}
print(fib(15,memo))
| HemantJaiman/Dynamic_programming | fibonacci.py | fibonacci.py | py | 474 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "functools.lru_cache",
"line_number": 6,
"usage_type": "call"
}
] |
27487883436 | import sys
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(
packages = [], excludes = [],
include_files = ['icon','toc'],
)
name = 'example'
if sys.platform == 'win32':
name = name + '.exe'
base = None
if sys.platform == "win32":
base = "Win32GUI"
executables = [
Executable('main.py', base = base, targetName = name,
compress = True,
)
]
setup(name='Example',
version = '1.0',
description = 'An example program',
options = dict(build_exe = buildOptions),
executables = executables) | lugandong/PyQt5Fastboot | setup_cxfreeze.py | setup_cxfreeze.py | py | 638 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.platform",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.platform",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cx_Freeze.Executable",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cx_Freeze.se... |
31983782662 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.ops import knn_points, ball_query
from .logger import logger
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S, [K]]
Return:
new_points:, indexed points data, [B, S, [K], C]
"""
raw_size = idx.size()
idx = idx.reshape(raw_size[0], -1)
res = torch.gather(points, 1, idx[..., None].expand(-1, -1, points.size(-1)))
return res.reshape(*raw_size, -1)
def sample_points_from_ball_query(pt_xyz, pt_feats, center_point, k, radius):
_, ball_idx, xyz = ball_query(center_point, pt_xyz, K=k, radius=radius, return_nn=True)
invalid = torch.sum(ball_idx == -1) > 0
if invalid:
logger.warning(f"ball query returns {torch.sum(ball_idx == -1)} / {torch.numel(ball_idx)} -1 in its index, "
f"which means you need to increase raidus or decrease K")
points = index_points(pt_feats, ball_idx).squeeze(1)
xyz = xyz.squeeze(1)
return xyz, points
def sample_points_from_knn(pt_xyz, pt_feats, center_point, k):
_, knn_idx, xyz = knn_points(center_point, pt_xyz, K=k, return_nn=True)
points = index_points(pt_feats, knn_idx).squeeze(1)
xyz = xyz.squeeze(1)
return xyz, points | lixiny/POEM | lib/utils/points_utils.py | points_utils.py | py | 1,345 | python | en | code | 51 | github-code | 1 | [
{
"api_name": "torch.gather",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytorch3d.ops.ball_query",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logger.logger.warning... |
73865842594 | import asyncio
from supybot import callbacks, httpserver, log
from .helpers import github
# Import files that will hook themself up when imported
from .events import ( # noqa
commit_comment,
discussion,
issue,
pull_request,
push,
tag,
)
from .patches import gidgethub # noqa
from .protocols import irc as irc_protocols # noqa
from .protocols import discord as discord_protocols # noqa
class GitHubCallback(httpserver.SupyHTTPServerCallback):
name = "GitHub"
defaultResponse = "404: page not found"
def doPost(self, handler, path, form):
if path != "/":
self.send_response(404)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b"404: page not found")
return
headers = {key.lower(): value for key, value in dict(self.headers).items()}
try:
asyncio.run(github.dispatch(headers, form))
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write(b"200: OK")
except Exception:
log.exception("Failed to handle GitHub event")
self.send_response(403)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b"403: failed processing event")
class GitHub(callbacks.Plugin):
"""Translates GitHub events to IRC messages."""
def __init__(self, irc):
self.__parent = super(GitHub, self)
callbacks.Plugin.__init__(self, irc)
httpserver.hook("github", GitHubCallback())
def die(self):
self.__parent.die()
httpserver.unhook("github")
Class = GitHub
| OpenTTD/DorpsGek | plugins/GitHub/plugin.py | plugin.py | py | 1,760 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "supybot.httpserver.SupyHTTPServerCallback",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "supybot.httpserver",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "asyncio.run",
"line_number": 36,
"usage_type": "call"
},
{
"api... |
20165490930 | import pytest
import for_mocking
"""
1. Реализовать программу на Python. Программа может содержать любое количество методов и классов (>0), но обязательно должна иметь класс main.
2. Имитировать:
a. Метод созданного класса (метод не должен являться генератором).
b. Параметр внутри метода класса.
c. Класс.
3. Спровоцировать имитированную ошибку.
4. Применить декоратор @patch на любом имитируемом объекте (классе, методе и т.д.).
Выбирается студентом самостоятельно.
5. Имитировать объект-генератор (см.пр. 26.6.3.3.[1]).
"""
def test_format_status(mocker):
main_object = for_mocking.Main('Not OK')
mocker.patch.object(main_object, 'format_status',
return_value=['Status: OK;'])
assert main_object.format_status() == ['Status: OK;']
assert main_object.status == 'Not OK'
def test_property_mocking(mocker):
main_object = for_mocking.Main('Not OK')
main_object = mocker.patch.object(main_object, 'format_status')
main_object.format_status(mocker.ANY)
main_object.format_status.assert_called_once_with(mocker.ANY)
def test_class_mocking(mocker):
mocker.patch.object(for_mocking, 'Main')
for_mocking.Main.format_status.return_value = 123
mocked_instance = for_mocking.Main('wow')
mocked_instance.format_status()
mocked_instance.format_status.assert_called_once_with() # pylint: disable=no-member
@pytest.mark.xfail(raises=Exception)
def test_generator(mocker):
main_object = for_mocking.Main('Not OK')
main_object = mocker.patch.object(main_object, 'format_status')
main_object.format_status(123)
def test_letter_generator(mocker):
main_object = for_mocking.Main('Not OK')
mocker.patch.object(main_object, 'letter_generator',
return_value=iter('Status: OK;'))
assert list(main_object.letter_generator()) == list('Status: OK;')
| iwouldnote/travis_codecod_test | test_mocking.py | test_mocking.py | py | 2,227 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "for_mocking.Main",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "for_mocking.Main",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "for_mocking.Main",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "for_mocking.M... |
35915131671 | # Mathematics > Probability > Sherlock and Probability
# Help Sherlock in finding the probability.
#
# https://www.hackerrank.com/challenges/sherlock-and-probability/problem
# https://www.hackerrank.com/contests/infinitum-jul14/challenges/sherlock-and-probability
# challenge id: 2534
#
from fractions import Fraction
for _ in range(int(input())):
n, k = map(int, input().split())
bits = input()
counter = [0] * (n + 1)
for i, bit in enumerate(bits):
counter[i + 1] = counter[i] + int(bit)
p = 0
for i, bit in enumerate(bits):
if bit == "1":
p += counter[min(n, i + k + 1)] - counter[max(0, i - k)]
r = Fraction(p, n * n)
print("{}/{}".format(r.numerator, r.denominator))
| rene-d/hackerrank | mathematics/probability/sherlock-and-probability.py | sherlock-and-probability.py | py | 738 | python | en | code | 72 | github-code | 1 | [
{
"api_name": "fractions.Fraction",
"line_number": 25,
"usage_type": "call"
}
] |
17811262113 | #!/usr/bin/env python3
import numpy as np
import h5py
import argparse
import matplotlib
import matplotlib.pyplot as plt
import csv
import glob
import os
def get_dataset_keys(f):
keys = []
f.visit(lambda key : keys.append(key) if isinstance(f[key], h5py.Dataset) else None)
return keys
def plot(time, data, key, figure_name):
os.system("mkdir -p {}min_max_mean/".format(figure_name))
if "min" in key or "mean" in key or "max" in key:
fig, (ax1) = plt.subplots(nrows=1, sharex=True)
plt.subplots_adjust(hspace=0.1)
ax1.plot(time, data[key], c="k") # , s=0.1, alpha=0.3)
ax1.set_title("Key: {}".format(key))
# ax1.set_xlabel(r'$r$')
# ax1.set_ylabel(r'$\rho$')
# ax1.set_xlim(0, r_max)
# ax1.set_ylim(0, 4.0)
fig.tight_layout()
plt.savefig("{}min_max_mean/min_max_mean_{}.png".format(figure_name, key))
else:
fig, (ax1) = plt.subplots(nrows=1, sharex=True)
plt.subplots_adjust(hspace=0.1)
# linestyle="dotted",
# linestyle="dashed",
# linestyle="dashdot"
ax1.plot(time, data["{}_min".format(key)], c="k", linestyle="dotted", label="min") # , s=0.1, alpha=0.3)
ax1.plot(time, data["{}_max".format(key)], c="k", linestyle="dashed", label="max")
ax1.plot(time, data["{}_mean".format(key)], c="k", linestyle="dashdot", label="mean")
ax1.set_title("Key: {}".format(key))
ax1.legend(loc="best")
# ax1.set_xlabel(r'$r$')
# ax1.set_ylabel(r'$\rho$')
# ax1.set_xlim(0, r_max)
# ax1.set_ylim(0, 4.0)
fig.tight_layout()
plt.savefig("{}min_max_mean/min_max_mean_{}.png".format(figure_name, key))
if __name__ == '__main__':
plot_all = False
parser = argparse.ArgumentParser(description="min/max/mean for all entries")
parser.add_argument("--input", "-i", metavar="str", type=str, help="input file",
nargs="?", default="../output")
parser.add_argument("--output", "-o", metavar="str", type=str, help="output directory",
nargs="?", default="-")
parser.add_argument("--key", "-k", metavar="str", type=str, help="output directory",
nargs="?", default="-")
parser.add_argument('--all', "-a", action='store_true')
args = parser.parse_args()
if args.all:
plot_all = True
header = []
detailed_header = []
data = {}
with open(args.input, newline='\n') as csvfile:
reader = csv.reader(csvfile, delimiter=';', quotechar='|')
for i_row, row in enumerate(reader):
if i_row == 0:
for elem in row:
header.append(elem)
elif i_row == 1:
for elem in row:
detailed_header.append(elem)
data[elem] = []
else:
for i_elem, elem in enumerate(row):
data[detailed_header[i_elem]].append(float(elem))
# print("data[{}] = {}".format(args.key, data[args.key]))
if args.key in header or args.key in detailed_header:
plot(data["time_max"], data, args.key, args.output)
if plot_all:
for _header in header:
print("plotting {} ...".format(_header))
plot(data["time_max"], data, _header, args.output)
| MichaelSt98/milupHPC | postprocessing/PlotMinMaxMean.py | PlotMinMaxMean.py | py | 3,358 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "h5py.Dataset",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyp... |
24477230064 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from django.conf.urls import patterns, url
from .views import PromoDetail, PromoList, ChannelPromoList
urlpatterns = patterns(
'',
url(
r'^$',
PromoList.as_view(),
name='list_promos'
),
url(
r'^channel/(?P<channel__long_slug>[\w//-]+)$',
ChannelPromoList.as_view(),
name='channel_promo'
),
url(
r'^(?P<slug>[\w-]+)/(?P<result>[\w-]+)$',
PromoDetail.as_view(),
name='result_promo'
),
url(
r'^(?P<slug>[\w-]+)\.html$',
PromoDetail.as_view(),
name='open_promo'
),
)
| opps/opps-promos | opps/promos/urls.py | urls.py | py | 646 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.PromoList.as_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": ... |
41292812898 | import decimal
import io
from nose.tools import assert_equal
import utcdatetime
from os.path import join as pjoin
from snipe import WatchListSnipesParser, parse_datetime
def test_get_snipes():
with io.open(pjoin('sample_data', 'watch_list.html')) as f:
parser = WatchListSnipesParser(f.read())
assert_equal(
list(parser.get_snipes()), [
('113143572176', decimal.Decimal('7.00')),
('292631197506', decimal.Decimal('44.00')),
]
)
def test_parse_snipe_note():
TEST_CASES = [
('snipe: 45', decimal.Decimal(45.00)),
('snipe: 45.00', decimal.Decimal(45.00)),
('snipe: £45.00', decimal.Decimal(45.00)),
]
for note, expected_amount in TEST_CASES:
yield assert_equal, WatchListSnipesParser._parse_snipe_note(note), expected_amount
def test_parse_datetime():
TEST_CASES = [
(
'(11 Jul, 2018\n09:58:34 BST)',
utcdatetime.utcdatetime(2018, 7, 11, 8, 58, 34)
),
(
'(11 Jan, 2018\n09:58:34 GMT)',
utcdatetime.utcdatetime(2018, 1, 11, 9, 58, 34)
),
]
for string, expected in TEST_CASES:
yield assert_equal, parse_datetime(string), expected
| fawkesley/ebay-sniper | test_parser.py | test_parser.py | py | 1,241 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "io.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "snipe.WatchListSnipesParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_eq... |
73380743073 | import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
from data_loader import get_data, generator
def cross_validation(model, X_train, X_train_angle, Y_train, X_test, X_test_angle, K):
folds = list(StratifiedKFold(n_splits=K, shuffle=True).split(X_train, Y_train))
y_test_pred = 0
y_train_pred = 0
y_valid_pred = 0.0 * Y_train
for j, (train_idx, test_idx) in enumerate(folds):
print('\n===================FOLD=', j)
X_cv = X_train[train_idx]
Y_cv = Y_train[train_idx]
X_angle_cv = X_train_angle[train_idx]
X_hold = X_train[test_idx]
Y_hold = Y_train[test_idx]
X_angle_hold = X_train_angle[test_idx]
# define file path and get callbacks
model.reset()
file_path = model.model_kind + "_model_weights_%s.hdf5" % j
model.train(X_cv, X_angle_cv, Y_cv, X_hold, X_angle_hold, Y_hold, file_path)
# Getting Training Score
loss, acc = model.eval(X_cv, X_angle_cv, Y_cv)
print('Train loss:', loss, ' Train accuracy:', acc)
# Getting Test Score
loss, acc = model.eval(X_hold, X_angle_hold, Y_hold)
print('Test loss:', loss, ' Test accuracy:', acc)
# Getting validation Score.
y_valid_pred[test_idx] = model.predict(X_hold, X_angle_hold)
y_test_pred += model.predict(X_test, X_test_angle)
y_train_pred += model.predict(X_train, X_train_angle)
y_test_pred = y_test_pred / K
y_train_pred = y_train_pred / K
print('\n Train Loss = ', log_loss(Y_train, y_train_pred))
print(' Train Acc = ', np.sum(Y_train == (y_train_pred > 0.5)) / Y_train.shape[0])
print(' Val Loss = ', log_loss(Y_train, y_valid_pred))
print(' Val Acc =', np.sum(Y_train == (y_valid_pred > 0.5)) / Y_train.shape[0])
return y_test_pred
| hzxsnczpku/nishiyami | train.py | train.py | py | 1,980 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.model_selection.StratifiedKFold",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.log_loss",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 45,
"usage_type": "call"
},
{
"api_na... |
15758517730 | from django.urls import path, include
from website.views import home, blog, perfil, login, acessar, cadastrar
urlpatterns = [
path('', home),
path('blog', blog),
path('login', login),
path('acessar', acessar),
path('perfil', perfil),
path('cadastrar', cadastrar),
]
| isadoraperes/projeto-demoday | website/urls.py | urls.py | py | 291 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "website.views.home",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "website.views.b... |
1990468444 | from msilib.schema import Error
import matplotlib.pyplot as plt
import numpy as np
import boto3
import os
def draw_chart(result, user_id, practice_id, gender):
w_min_jitter = 1.599
w_max_jitter = 2.310
w_min_shimmer = 7.393
w_max_shimmer = 12.221
m_min_jitter = 2.159
m_max_jitter = 3.023
m_min_shimmer = 10.132
m_max_shimmer = 13.526
min_speed = 96
max_speed = 124
font_size = 12
# 차트 그리기
# 1) speed
speed_x = ['slow', 'user', 'fast']
speed_y = [min_speed, result['voice']['speed'], max_speed]
speed_colors = ['lightgray', 'mediumpurple', 'silver']
plt.xlabel('standard', size=font_size)
plt.ylabel('Words Per Minute', size=font_size)
bar_chart=plt.bar(speed_x, speed_y, color=speed_colors)
for rect in bar_chart:
height = rect.get_height()
plt.text(rect.get_x()+rect.get_width()/2.0, height-font_size, height, ha='center', va='bottom', size=12)
plt.savefig(f'./{user_id}_{practice_id}_speed.png')
plt.clf()
# 2) shimmer, jitter
if gender == 'W':
mins = [w_min_jitter, w_min_shimmer]
maxs = [w_max_jitter, w_max_shimmer]
else:
mins = [m_min_jitter, m_min_shimmer]
maxs = [m_max_jitter, m_max_shimmer]
users = [result['voice']['jitter'], result['voice']['shimmer']]
checks = ['jitter', 'shimmer']
bar_width=0.25
index = np.arange(2)
b1 = plt.bar(index, mins, bar_width, color='lightgray', label='mins')
b2 = plt.bar(index+bar_width, users, bar_width, color='mediumpurple', label='users')
b3 = plt.bar(index+2*bar_width, maxs, bar_width, color='silver', label='maxs')
plt.legend()
plt.savefig(f'./{user_id}_{practice_id}_shimmer_jitter.png')
plt.clf()
# 3) closing remarks
closing_remark_ratio = [result['voice']['closing_remarks'], 100-result['voice']['closing_remarks']]
closing_remark_labels = ['correctly recognized', 'recognization failed']
closing_remark_colors = ['thistle', 'mediumpurple']
wedgeprops = {'width': 0.7, 'edgecolor': 'w', 'linewidth': 5}
plt.pie(closing_remark_ratio, labels=closing_remark_labels, autopct='%.1f%%', startangle=180, colors = closing_remark_colors, wedgeprops=wedgeprops)
plt.savefig(f'./{user_id}_{practice_id}_closing_remarks.png')
plt.clf()
# 4) movement
move_duration = result['video']['pose']['inclined_duration']
+result['video']['pose']['first_duration']
+result['video']['pose']['second_duration']
+result['video']['pose']['third_duration']
+result['video']['eyes']['script_duration']
+ result['video']['eyes']['around_duration']
+ result['video']['eyes']['face_move_duration']
move_ratio = [result['video']['pose']['total_duration'], move_duration]
move_labels = ['normal', 'bad movement']
move_colors = ['thistle', 'mediumpurple']
plt.pie(move_ratio, labels=move_labels, autopct='%.1f%%', startangle=180, colors=move_colors, wedgeprops=wedgeprops)
plt.savefig(f'./{user_id}_{practice_id}_movement.png')
plt.clf()
# 5) movement detail
bad_x = ['around', 'inclined', 'first', 'second', 'third', 'script', 'face_move']
bad_y = [
result['video']['eyes']['around_duration'],
result['video']['pose']['inclined_duration'],
result['video']['pose']['first_duration'],
result['video']['pose']['second_duration'],
result['video']['pose']['third_duration'],
result['video']['eyes']['script_duration'],
result['video']['eyes']['face_move_duration']
]
bad_colors = ['thistle', 'plum', 'mediumpurple', 'mediumslateblue', 'mediumorchid', 'darkorchid', 'rebeccapurple']
plt.xlabel('sort', size=font_size)
plt.ylabel('duration', size=font_size)
bar_chart=plt.bar(bad_x, bad_y, color=bad_colors)
plt.savefig(f'./{user_id}_{practice_id}_movement_detail.png')
plt.clf()
def upload_chart_to_s3(user_id, practice_id):
s3=boto3.client('s3')
bucket_name = "sookpeech-wavfile"
# chart 업로드하기
files = {
"closing_remarks" : f'./{user_id}_{practice_id}_closing_remarks.png',
"movement" : f'./{user_id}_{practice_id}_movement.png',
"movement_detail": f'./{user_id}_{practice_id}_movement_detail.png',
"shimmer_jitter" : f'./{user_id}_{practice_id}_shimmer_jitter.png',
"speed" : f'./{user_id}_{practice_id}_speed.png'
}
for key in files.keys():
try:
file = open(files[key], 'rb')
s3.upload_fileobj(file, bucket_name, f'{user_id}/{practice_id}/{key}.png', ExtraArgs={'ContentType': 'image/png', 'ACL':'public-read'})
except:
print(f'failed to upload chart image = {key}')
def delete_image(user_id, practice_id):
files = {
"closing_remarks" : f'./{user_id}_{practice_id}_closing_remarks.png',
"movement" : f'./{user_id}_{practice_id}_movement.png',
"movement_detail": f'./{user_id}_{practice_id}_movement_detail.png',
"shimmer_jitter" : f'./{user_id}_{practice_id}_shimmer_jitter.png',
"speed" : f'./{user_id}_{practice_id}_speed.png'
}
for key in files.keys():
if os.path.exists(files[key]):
os.remove(files[key])
| Sookpeech/django-analysis | sookpeech_analysis/analysis/make_chart.py | make_chart.py | py | 5,246 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "m... |
73428873635 | #!/usr/bin/env python
import time
import openstack
NODE_COUNT = 43
def get_connection():
# openstack.enable_logging(debug=True)
conn = openstack.connect()
return conn
def main():
conn = get_connection()
for i in range(NODE_COUNT):
name = "euclid-ral_compute_%d" % i
conn.delete_server(name, wait=True)
for i in range(NODE_COUNT):
name = "euclid-ral_compute_%d" % i
conn.delete_volume(name, wait=True)
if __name__ == '__main__':
main()
| astrodb/euclid-saas | delete_servers_euclid.py | delete_servers_euclid.py | py | 500 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "openstack.connect",
"line_number": 7,
"usage_type": "call"
}
] |
11720956852 | # -*- coding: utf-8 -*-
"""
Preppin' Data 2020: Week 9 - C&BS Co: Political Monitoring
https://preppindata.blogspot.com/2020/02/2020-week-9.html
- Input data
- Remove the Average Record for the polls
- Clean up your Dates
- Remove any Null Poll Results
- Form a Rank (modified competition) of the candidates per Poll based on their results
- Determine the spread for each poll from 1st Place to 2nd Place
- Rename Sample Types: RV = Registered Voter, LV = Likely Voter, null = Unknown
- Output the Data
- Optional: Build the Viz
Author: Kelly Gilbert
Created: 2022-02-02
Requirements:
- input dataset:
- PD 2020 Wk 9 Input - Sheet1.csv
- output dataset (for results check only):
- PD 2020 Wk 9 Output.csv
"""
from numpy import nan, where
import pandas as pd
#---------------------------------------------------------------------------------------------------
# input the data
#---------------------------------------------------------------------------------------------------
# read in the file, melt the candidates into rows, remove nulls, remove averages
df = pd.read_csv(r'.\inputs\PD 2020 Wk 9 Input - Sheet1.csv', na_values='--')\
.melt(id_vars=['Poll', 'Date', 'Sample'], var_name='Candidate', value_name='Poll Results')\
.dropna(subset=['Poll Results'])\
.query("~Poll.str.contains('Average')", engine='python')
#---------------------------------------------------------------------------------------------------
# process the data
#---------------------------------------------------------------------------------------------------
# clean up end date
df['End Date'] = pd.to_datetime(df['Date'].str.extract('.*- (\d+/\d+)', expand=False) + '/2020')
df['End Date'] = where(df['End Date'].dt.month >= 7,
df['End Date'] + pd.DateOffset(years=-1),
df['End Date'])
# form a Rank (modified competition) of the candidates per Poll based on their results
df['Rank'] = df.groupby(['Poll', 'End Date', 'Sample'])['Poll Results'].rank(method='max', ascending=False)\
.astype(int)
# difference in poll results between first and second
df['Spread from 1st to 2nd Place'] = \
df.groupby(['Poll', 'End Date', 'Sample'], as_index=False)['Poll Results']\
.transform(lambda x: x.max() - x.nlargest(2).min())
# rename sample types
sample_map = {'.*RV' : 'Registered Voter', '.*LV' : 'Likely Voter', nan : 'Unknown'}
df['Sample Type'] = df['Sample'].replace(sample_map, regex=True)
#---------------------------------------------------------------------------------------------------
# output the file
#---------------------------------------------------------------------------------------------------
out_cols = ['Candidate', 'Poll Results', 'Spread from 1st to 2nd Place', 'Rank', 'End Date',
'Sample Type', 'Poll']
df.to_csv(r'.\outputs\output-2020-09.csv', index=False, columns=out_cols, date_format='%d/%m/%Y')
#---------------------------------------------------------------------------------------------------
# create chart
#---------------------------------------------------------------------------------------------------
from bokeh.io import output_file
from bokeh.layouts import row
from bokeh.models import CustomJS, Legend, DatetimeTickFormatter, Select, Title
from bokeh.plotting import figure, show
# color constants
color_selected = '#0066cc'
color_deselected = '#bab0ac'
# set the output file path
output_file('dimensions.html')
# subset of registered voters
df_rv = df.loc[df['Sample Type']=='Registered Voter']\
.sort_values(by=['Candidate', 'End Date', 'Poll'])
# add a figure and format it
p = figure(width=900, height=500, x_axis_type='datetime')
p.add_layout(Title(text='Data from: realclearpolitics.com; Sample Type: Registered Voter',
text_font_size='9pt'), 'above')
p.add_layout(Title(text='2020 Democratic Presidential Nominations', text_font_size="24pt"), 'above')
p.xaxis.formatter=DatetimeTickFormatter(days=["%Y-%m-%d"])
p.y_range.flipped = True
p.add_layout(Legend(), 'right')
# add a line and circles for each candidate option
candidates = sorted(df_rv['Candidate'].unique())
line_dict = {}
circle_dict = {}
for i, c in enumerate(candidates):
line_dict[c] = p.line(df_rv.loc[df['Candidate']==c]['End Date'],
df_rv.loc[df['Candidate']==c]['Rank'],
legend_label=c, line_width=2,
color=color_selected if i==0 else color_deselected)
circle_dict[c] = p.circle(df_rv.loc[df['Candidate']==c]['End Date'],
df_rv.loc[df['Candidate']==c]['Rank'],
legend_label=c, size=7,
line_width=0,
fill_color=color_selected if i==0 else color_deselected)
# create a drop-down menu
menu = Select(options=candidates, value=candidates[0], title='Select an item:')
# link the plot and the button using a callback function
# cb_obj = the model that triggered the callback (e.g. button model, dropdown model)
# args = list of name=object values you want to have accessible inside the callback function
# can't assign cb_obj.value to a model property directly; you have to store it in a variable first
callback = CustomJS(args=dict(p=p, lines=line_dict, circles=circle_dict), code="""
const t = cb_obj.value;
// make the selected item's marks blue and the rest gray
for (let i in lines) {
if (i == t) {
lines[i].glyph.line_color = '""" + color_selected + """';
circles[i].glyph.fill_color = '""" + color_selected + """';
} else {
lines[i].glyph.line_color = '""" + color_deselected + """';
circles[i].glyph.fill_color = '""" + color_deselected + """';
}
}
""")
menu.js_on_change('value', callback)
# display the layout
chart_layout = row(p, menu)
show(chart_layout)
#---------------------------------------------------------------------------------------------------
# check results
#---------------------------------------------------------------------------------------------------
solution_files = ['PD 2020 Wk 9 Output.csv']
my_files = ['output-2020-09.csv']
unique_cols = [['Candidate', 'Poll', 'End Date', 'Sample Type']]
col_order_matters = True
round_dec = 8
for i, solution_file in enumerate(solution_files):
print('---------- Checking \'' + solution_file + '\' ----------\n')
# read in the files
df_sol = pd.read_csv('.\\outputs\\' + solution_file, encoding='utf-8')
df_mine = pd.read_csv('.\\outputs\\' + my_files[i], encoding='utf-8')
# are the columns the same?
solution_cols = list(df_sol.columns)
myCols = list(df_mine.columns)
if not col_order_matters:
solution_cols.sort()
myCols.sort()
col_match = False
if solution_cols != myCols:
print('*** Columns do not match ***')
print(' Columns in solution: ' + str(list(df_sol.columns)))
print(' Columns in mine : ' + str(list(df_mine.columns)))
print('\n\n')
else:
print('Columns match\n')
col_match = True
# are the values the same? (only check if the columns matched)
if col_match:
errors = 0
df_compare = df_sol.merge(df_mine, how='outer', on=unique_cols[i],
suffixes=['_sol', '_mine'], indicator=True)
# extra/missing records
if len(df_compare[df_compare['_merge'] != 'both']) > 0:
print('*** Missing or extra records ***\n')
print('In solution, not in mine:\n')
print(df_compare[df_compare['_merge'] == 'left_only'][unique_cols[i]])
print('\n\nIn mine, not in solution:\n')
print(df_compare[df_compare['_merge'] == 'right_only'][unique_cols[i]])
errors += 1
# for the records that matched, check for mismatched values
for c in [c for c in df_sol.columns if c not in unique_cols[i]]:
if 'float' in df_compare[f'{c}_sol'].dtype.name:
df_compare[f'{c}_sol'] = df_compare[f'{c}_sol'].round(round_dec)
df_compare[f'{c}_mine'] = df_compare[f'{c}_mine'].round(round_dec)
unmatched = df_compare[(df_compare['_merge']=='both')
& (df_compare[f'{c}_sol'] != df_compare[f'{c}_mine'])]
if len(unmatched) > 0:
print(f'*** Values do not match: {c} ***\n')
print(df_compare[(df_compare['_merge']=='both')
& (df_compare[f'{c}_sol'] != df_compare[f'{c}_mine'])]\
[unique_cols[i] + [f'{c}_sol', f'{c}_mine']])
print('\n')
errors += 1
if errors == 0:
print('Values match')
print('\n')
| kelly-gilbert/preppin-data-challenge | 2020/preppin-data-2020-09/preppin-data-2020-09.py | preppin-data-2020-09.py | py | 9,113 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.DateOffset",
... |
11196016804 | import sys
import pygame
import random
from src import hero
from src import enemy
class Controller:
def __init__(self, width=640, height=480):
"""
Initializes and sets up the game
args: self.width (int) Width (left to right) of the screen
self.height (int) Height (top to bottom) of the screen
self.screen (display) The screen that is displayed
self.background (surface) Background of the screen
self.enemies (sprite) Group of enemies as sprites
num_enemies (int) The number of enemies in the game
x (int) Random x coordinate for each enemy
y (int) Random y coordinate for each enemy
self.hero (class)
self.all_sprites (sprite) All the sprites, including the hero and the enemies
self.state (str) String labeled "GAME"
return: none
"""
pygame.init()
self.width = width
self.height = height
self.screen = pygame.display.set_mode((self.width, self.height))
self.background = pygame.Surface(self.screen.get_size()).convert()
self.background.fill((250, 250, 250)) # set the background to white
pygame.font.init() # you have to call this at the start, if you want to use this module.
pygame.key.set_repeat(1, 50) # initialize a held keey to act as repeated key strikes
"""Load the sprites that we need"""
self.enemies = pygame.sprite.Group()
num_enemies = 3
for i in range(num_enemies):
x = random.randrange(100, 400)
y = random.randrange(100, 400)
self.enemies.add(enemy.Enemy("Boogie", x, y, 'assets/enemy.png'))
self.hero = hero.Hero("Conan", 50, 80, "assets/hero.png")
self.all_sprites = pygame.sprite.Group((self.hero,) + tuple(self.enemies))
self.state = "GAME"
def mainLoop(self):
"""
Controls the state of the game
args: self.state (str) String labeled as either "GAME" or "GAMEOVER"
return: none
"""
while True:
if(self.state == "GAME"):
self.gameLoop()
elif(self.state == "GAMEOVER"):
self.gameOver()
def gameLoop(self):
"""
Enables the hero to move based on key inputs from the user
args: self.state (str) String labeled as "GAME"
event.type (text) The input of the event that is taking place
event.key (int) The key that is inputted on the keyboard from the user
fights (list) The hero and the enemies colliding as means of fighting
e (int) The enemy that the fighter is fighting
self.hero.health (int) The number of health points the hero has
self.state (str) String labeled as "GAMEOVER"
return: none
"""
while self.state == "GAME":
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if(event.key == pygame.K_UP):
self.hero.move_up()
elif(event.key == pygame.K_DOWN):
self.hero.move_down()
elif(event.key == pygame.K_LEFT):
self.hero.move_left()
elif(event.key == pygame.K_RIGHT):
self.hero.move_right()
# check for collisions
fights = pygame.sprite.spritecollide(self.hero, self.enemies, True)
if(fights):
for e in fights:
if(self.hero.fight(e)):
e.kill()
self.background.fill((250, 250, 250))
else:
self.background.fill((250, 0, 0))
self.enemies.add(e)
# redraw the entire screen
self.enemies.update()
self.screen.blit(self.background, (0, 0))
if(self.hero.health == 0):
self.state = "GAMEOVER"
self.all_sprites.draw(self.screen)
# update the screen
pygame.display.flip()
def gameOver(self):
"""
Ends the game and shows a gameover screen
args: myfont (font) Changes the system font
message (str) Writes out 'Game Over'
event.type (text) The input of the event taking place
return: none
"""
self.hero.kill()
myfont = pygame.font.SysFont(None, 30)
message = myfont.render('Game Over', False, (0, 0, 0))
self.screen.blit(message, (self.width / 2, self.height / 2))
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
| brianskim27/cs110 | ch-11-lab-brianskim27/src/controller.py | controller.py | py | 4,941 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surfac... |
23938708203 |
# A very simple Flask Hello World app for you to get started with...
import logging, sys
from flask import Flask, request
from CrapBot import Bot
crap_bot = Bot()
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello from Flask!'
@app.route('/bot', methods=['POST', 'GET'])
def bot():
update = request.json
if update:
crap_bot.handle(update)
return 'Ok'
if __name__ == '__main__':
from CrapBot import Logger
Logger.warn('bot started')
#from CrapBot.Api import set_webhook
#r = set_webhook()
#print(r)
crap_bot.listen() | Markcial/CrapBot | flask_app.py | flask_app.py | py | 588 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "CrapBot.Bot",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"li... |
74219528992 | from lyse import *
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import scipy.constants as constants
import AnalysisSettings
import SrConstants
from Subroutines.FitFunctions import gauss
from scipy import stats
camera = AnalysisSettings.Camera
pixelSize = SrConstants.pixelSizeDict[camera]
name = "Different_Loading_Curves_20_02_04"
path = "C:\\Experiments\\example_experiment\\grating_MOT\\AnalysisData\\" + name + ".h5"
run = Run(path)
df = data()
sequence_index = df["sequence_index"]
run_number = df["run number"]
filepath = df["filepath"]
norms = df['FitMOTLoad','fluorNorm']
BlueMOTLoadTime = df['BlueMOTLoadTime']
BlueMOTBeatnote = df['BlueMOTBeatnote']
BlueMOTPower_V = df['BlueMOTPower']
atomNumber = df["splice_gaussian_fit","atomNumber"]
loadingTimeConstant = df["FitMOTLoad", "loadingTimeConstant"]
loadingRate = df["FitMOTLoad", "loadingRate"]
widthX = df["splice_gaussian_fit","widthX"]
widthZ = df["splice_gaussian_fit","widthZ"]
TimeOfFlight = df["TimeOfFlight"]
avgWidth = df["splice_gaussian_fit","avgWidth"]
avgPeakOD = df["splice_gaussian_fit","avgPeakOD"]
avgNorm = 0.04
normFlrNum = loadingRate * loadingTimeConstant * avgNorm/ norms
avgNormRate = loadingRate * avgNorm / norms
SourceCurrent = df["SourceCurrent"]
runtime = df["run time"]
fluorCounts = []
fluorCountsCulled = range(len(filepath))
time = range(len(filepath))
timeCulled = range(len(filepath))
i = 0
for file in filepath:
fluorCounts.append(np.zeros(1))
with h5py.File(file,'a') as f:
for result in f['results']['FitMOTLoad']['atomNummbers']:
np.append(fluorCounts[i],result)
i+= 1
print(fluorCounts)
FluorCounts = df["FitMOTLoad","atomNummbers"]
FluorCountsCulled = df["FitMOTLoad","atomNummbersCulled"]
time = df["FitMOTLoad", "time"]
timeCulled = df["FitMOTLoad", "timeCulled"]
def parabola(v,a,b,c):
return a*(v-c)**2 +b
power_params = [-758.105, 92.576, 4.087]
gaussianWidth = 0.00914
Isat = 403
BlueMOTPower_mW = parabola(BlueMOTPower_V, *power_params)
I_Isat = (BlueMOTPower_mW*(10**-3)/(np.pi*gaussianWidth**2))/Isat
#Do Calculations
pOpt, pCov = curve_fit(gauss, BlueMOTBeatnote, atomNumber, p0 = (1000, 95, 10, 0))
PeakBeatnote = pOpt[1]
Detuning = BlueMOTBeatnote - PeakBeatnote
linewidth = 30.5
# SourceCurrentSing = [12,12.5,13,13.5,14]
DetuningSing = []
normFlrNumSing = []
normFlrNumSingDev =[]
#
for beatnote in Detuning:
if beatnote not in DetuningSing:
DetuningSing.append(beatnote)
#
#
atomNumberSing = []
atomNumberSingDev =[]
normTauSing = []
normTauSingDev =[]
normRateSing = []
normRateSingDev = []
#
for beatnote in DetuningSing:
points1 = []
#points2 = []
#points3 = []
for i in range(len(run_number)):
if Detuning[i] == beatnote:
points1.append(atomNumber[i])
# points2.append(avgNormRate[i])
# points3.append(normFlrNum[i])
atomNumberSing.append(np.average(points1))
atomNumberSingDev.append(stats.sem(points1))
# normRateSing.append(np.average(points2))
# normRateSingDev.append(np.std(points2))
# normFlrNumSing.append(np.average(points3))
# normFlrNumSingDev.append(np.std(points3))
corAtomNumberSing = np.array(atomNumberSing) * 1/(1 + 4*(np.array(DetuningSing)/linewidth)**2)
corAtomNumberSingDev = np.array(atomNumberSingDev) * 1/(1 + 4*(np.array(DetuningSing)/linewidth)**2)
# fig, ax1 = plt.subplots(1)
# #plot = ax.errorbar(BlueMOTPower_V, normFlrNum, fmt = 'bo', label = 'norms')
# #plot = plt.scatter(SourceCurrent, loadingTimeConstant)
# ax1.errorbar(DetuningSing, atomNumberSing, yerr = atomNumberSingDev,fmt = 'bo')
# ax1.errorbar(DetuningSing, corAtomNumberSing, yerr = corAtomNumberSingDev,fmt = 'go')
# ax1.plot(np.linspace(-18,9,100), gauss(np.linspace(-18,9,100) + PeakBeatnote,*pOpt), 'r-')
run.save_result('norms',norms)
run.save_result('atomNumber', atomNumber)
run.save_result("loadingRate", loadingRate)
run.save_result("loadingTimeConstant", loadingTimeConstant)
run.save_result("avgWidth", avgWidth)
run.save_result("avgPeakOD", avgPeakOD)
run.save_result("avgNorm", np.average(norms))
run.save_result("TimeOfFlight", TimeOfFlight)
run.save_result("widthX", widthX)
run.save_result("widthZ", widthZ)
run.save_result("FluorCounts",FluorCounts)
run.save_result("FluorCountsCulled", FluorCountsCulled)
run.save_result("time", time)
run.save_result("timeCulled", timeCulled)
#run.save_result("BlueMOTPower_V", BlueMOTPower_V)
# run.save_result("Detuning", Detuning)
# run.save_result("DetuningSing", DetuningSing)
# run.save_result("corAtomNumberSing", corAtomNumberSing)
# run.save_result("corAtomNumberSingDev",corAtomNumberSingDev)
# run.save_result("PeakBeatnote" ,PeakBeatnote)
# run.save_result("linewidth", linewidth)
# run.save_result("atomNumberSing",atomNumberSing)
# run.save_result("atomNumberSingDev",atomNumberSingDev)
# run.save_result("SourceCurrent", SourceCurrent)
run.save_result("normFlrNum", normFlrNum)
#run.save_result("normFlrNumSing", normFlrNumSing)
#run.save_result("normFlrNumSingDev", normFlrNumSingDev)
#run.save_result("normTauSing", normTauSing)
#run.save_result("normTauSingDev", normTauSingDev)
#run.save_result("normRateSing", normRateSing)
#run.save_result("normRateSingDev", normRateSingDev)
#run.save_result("SourceCurrentSing", SourceCurrentSing)
# run.save_result("gaussianWidth", gaussianWidth)
# run.save_result("Isat", Isat)
# run.save_result("BlueMOTPower_mW", BlueMOTPower_mW)
# run.save_result("I_Isat", I_Isat)
# run.save_result("avgNormRate", avgNormRate)
# run.save_result("avgNorm", avgNorm)
files = []
for i in range(len(sequence_index)):
files.append(str(sequence_index[i]) + '_' + str(run_number[i]))
run.save_result("files", files)
run.save_result("filepath", filepath)
| Loki27182/userlib | analysislib/SrII/old_stuff/AnalysisMultishot.py | AnalysisMultishot.py | py | 5,763 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "AnalysisSettings.Camera",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "SrConstants.pixelSizeDict",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 48,
"usage_type": "call"
},
{
"api_name"... |
35076946414 | """
Harvester scripts
Currently only supports AVR atdf files
"""
# Python 3 compatibility for Python 2
from __future__ import print_function
import argparse
import textwrap
from xml.etree import ElementTree
from pymcuprog.deviceinfo.memorynames import MemoryNames
from pymcuprog.deviceinfo.deviceinfokeys import DeviceMemoryInfoKeys, DeviceInfoKeysAvr
def map_atdf_memory_name_to_pymcuprog_name(atdf_name):
"""
Mapping a memory name in atdf files to the corresponding memory name used in the pymcuprog device models
:param atdf_name: Name of memory in atdf files
:return: Name of memory in pymcuprog device models
"""
pymcuprog_name = 'unknown'
if atdf_name == 'progmem':
pymcuprog_name = MemoryNames.FLASH
if atdf_name == 'user_signatures':
# Datasheets actually use user_row for UPDI devices at least
pymcuprog_name = MemoryNames.USER_ROW
if atdf_name == 'eeprom':
pymcuprog_name = MemoryNames.EEPROM
if atdf_name == 'fuses':
pymcuprog_name = MemoryNames.FUSES
if atdf_name == 'lockbits':
pymcuprog_name = MemoryNames.LOCKBITS
if atdf_name == 'signatures':
pymcuprog_name = MemoryNames.SIGNATURES
if atdf_name == 'internal_sram':
pymcuprog_name = MemoryNames.INTERNAL_SRAM
return pymcuprog_name
def determine_chiperase_effect(name, architecture):
if 'avr' in architecture:
if name in ['user_signatures', 'fuses', 'signatures']:
return('ChiperaseEffect.NOT_ERASED')
elif name in ['internal_sram', 'lockbits', 'progmem']:
return('ChiperaseEffect.ALWAYS_ERASED')
elif name in ['eeprom']:
return('ChiperaseEffect.CONDITIONALLY_ERASED_AVR')
else:
return('# To be filled in manually')
def determine_isolated_erase(name, architecture):
if 'avr' in architecture:
if name in ['user_signatures', 'eeprom']:
return('True')
elif name in ['internal_sram', 'lockbits', 'progmem', 'fuses', 'signatures']:
return('False')
else:
return('# To be filled in manually')
def determine_write_size(memory_name, page_size, device_name):
write_size = 1
device_name = device_name.lower()
if memory_name is 'flash':
if (device_name.find('avr') != -1 and ((device_name.find('da') != -1) or (device_name.find('db') != -1) or (device_name.find('dd') != -1))):
write_size = 2
else:
write_size = page_size
elif memory_name is 'signatures':
write_size = 0
return write_size
def capture_memory_attribute(attribute, architecture, device_name):
"""
Capture the memory attribute
:param attribute: memory attribute to capture
:return: attributes found as a string
"""
name = attribute['name'].lower()
size = attribute['size']
start = attribute['start']
try:
pagesize = attribute['pagesize']
except KeyError:
pagesize = '1'
# For some AVRs the ATDF gives a pagesize of fuses and lockbits equal to flash or EEPROM page size but fuses and lockbits are always byte accessible.
if name in ['fuses', 'lockbits']:
pagesize = '1'
# Read size is always 1 byte except for flash that can only read complete words
readsize = 1
if name in ['progmem']:
readsize = 2
output = ""
# These names are the names used in the atdf files and might differ from the pymcuprog MemoryNames
if name in ['progmem', 'eeprom', 'user_signatures', 'fuses', 'lockbits', 'signatures', 'internal_sram']:
print_name = map_atdf_memory_name_to_pymcuprog_name(name)
output += "\n # {}\n".format(print_name)
output += capture_field('{}_{}_byte'.format(print_name, DeviceMemoryInfoKeys.ADDRESS), start)
output += capture_field('{}_{}_bytes'.format(print_name, DeviceMemoryInfoKeys.SIZE), size)
output += capture_field('{}_{}_bytes'.format(print_name, DeviceMemoryInfoKeys.PAGE_SIZE), pagesize)
# These are the same for all AVRs
output += " '{}_{}_bytes': {},\n".format(print_name, DeviceMemoryInfoKeys.READ_SIZE, readsize)
output += " '{}_{}_bytes': {},\n".format(print_name, DeviceMemoryInfoKeys.WRITE_SIZE, determine_write_size(print_name, pagesize, device_name))
output += " '{}_{}': {},\n".format(print_name, DeviceMemoryInfoKeys.CHIPERASE_EFFECT, determine_chiperase_effect(name, architecture))
output += " '{}_{}': {},\n".format(print_name, DeviceMemoryInfoKeys.ISOLATED_ERASE, determine_isolated_erase(name, architecture))
return output
def capture_register_offset(name, offset):
"""
Wrapper to create a string definition
:param name: register name
:param offset: register offset
:return: string of register and offset
"""
return capture_field("{}_base".format(name.lower()), offset)
def capture_field(field, value):
"""
Macro to create text format field
:param field: register name
:param value: register value
:return: string of definition
"""
try:
_test_value = int(value, 16)
except (ValueError, AttributeError):
# Can't convert string to int, assumed to be string
return " '{}': '{}',\n".format(field, value)
return " '{}': {},\n".format(field, value)
def capture_device_element(element, device_name):
"""
Capture data from a device element
:param element: element with tag='device'
:return: captured data from the device element as a string
"""
architecture = element.attrib['architecture'].lower()
output = capture_field('name', element.attrib['name'].lower())
output += capture_field('architecture', architecture)
for i in element.iterfind("address-spaces/address-space/memory-segment"):
output += capture_memory_attribute(i.attrib, architecture, device_name)
output += "\n # Some extra AVR specific fields\n"
return output
def capture_module_element(element):
"""
Capture data from a module element
This function will return data captured from the module element but will also check if the module
element contains info about an UPDI fuse (fuse to configure a shared UPDI pin)
:param element: element with tag='module'
:return output, found_updi_fuse
output: captured module element data as a string
found_updi_fuse: True if the module element contained info about an UPDI fuse
"""
output = ""
found_updi_fuse = False
for i in element.iterfind("instance/register-group"):
name = i.attrib['name']
offset = "0x{:08X}".format(int(i.attrib['offset'], 16))
if i.attrib['name'] == 'SYSCFG':
output += capture_register_offset(name, offset)
output += capture_register_offset('OCD', "0x{:08X}".format(int(offset, 16) + 0x80))
if i.attrib['name'] == 'NVMCTRL':
output += capture_register_offset(name, offset)
for i in element.iterfind("instance/signals/signal"):
if i.attrib['group'] == 'UPDI' and i.attrib['pad'] is not None:
output += capture_field('prog_clock_khz', '900')
found_updi_fuse = True
return output, found_updi_fuse
#This dict serves as a temporary workaround for an xml-parsing bug, where the device signature can't be extracted for some devices
spare_signatures = {
'atmega16': bytearray(b'\x1e\x94\x03'),
'atmega1609': bytearray(b'\x1e\x94\x26'),
'atmega168a': bytearray(b'\x1e\x94\x06'),
'atmega3209': bytearray(b'\x1e\x95\x31'),
'atmega328': bytearray(b'\x1e\x95\x14'),
'atmega4809': bytearray(b'\x1e\x96\x51'),
'atmega48p': bytearray(b'\x1e\x92\x0a'),
'atmega809': bytearray(b'\x1e\x93\x2a'),
'atmega88a': bytearray(b'\x1e\x93\x0a'),
'attiny202': bytearray(b'\x1e\x91\x23'),
'attiny402': bytearray(b'\x1e\x92\x27'),
'avr128da32': bytearray(b'\x1e\x97\x09'),
'avr32da32': bytearray(b'\x1e\x95\x33'),
'avr64da32': bytearray(b'\x1e\x96\x14')
}
def capture_signature_from_property_groups_element(element, device_name):
"""
Capture signature (Device ID) data from a property-group element
:param element: element with tag='property-groups'
:return bytearray with 3 bytes of Device ID data
"""
signature = bytearray(3)
success = False
for i in element.iterfind("property-group/property"):
if i.attrib['name'] == 'SIGNATURE0':
signature[0] = int(i.attrib['value'], 16)
if i.attrib['name'] == 'SIGNATURE1':
signature[1] = int(i.attrib['value'], 16)
if i.attrib['name'] == 'SIGNATURE2':
signature[2] = int(i.attrib['value'], 16)
success = True
if success is False:
signature = spare_signatures[device_name.lower()]
success = True
return signature, success
#This dict serves as a temporary workaround for an xml-parsing bug, where the flash_offset can't be extracted for some devices
spare_flash_offset = {
'atmega809': '0x00004000',
'atmega1609': '0x00004000',
'attiny202': '0x00008000',
'attiny402': '0x00008000',
'avr32da32': '0x00800000',
'avr64da32': '0x00800000',
'avr128da32': '0x00800000',
}
def correct_flash_offset(element, device_name, data_string):
corrected_data_string = ''
device_name = device_name.lower()
success = False
for i in element.iterfind("property-group/property"):
if i.attrib['name'] == 'PROGMEM_OFFSET':
value = i.attrib['value']
success = True
if success is False:
try:
value = spare_flash_offset[device_name.lower()]
success = True
except:
success = False
if success is True:
ind = data_string.find("'flash_address_byte': ")
corrected_data_string += data_string[:ind+22]
corrected_data_string += value
corrected_data_string += data_string[ind+22+10:]
return corrected_data_string
return data_string
def determine_address_size(device_name):
address_size = '16-bit'
device_name = device_name.lower()
if (device_name.find('avr') != -1 and ((device_name.find('da') != -1) or (device_name.find('db') != -1))):
address_size = '24-bit'
return address_size
def harvest_from_file(filename):
"""
Harvest parameters from a file
:param filename: path to file to parse
:return: list of parameters
"""
xml_iter = ElementTree.iterparse(filename, events=('start', 'end'))
output = ""
shared_updi = False
for event, elem in xml_iter:
if event == 'start':
if elem.tag == 'device':
device_name = elem.attrib['name']
output += capture_device_element(elem, device_name)
if elem.tag == 'module':
module_output, found_updi_fuse = capture_module_element(elem)
output += module_output
if found_updi_fuse:
shared_updi = True
if elem.tag == 'interface':
output += capture_field(elem.tag, elem.attrib['name'])
if elem.tag == 'property-groups':
signature, signature_harvest_success = capture_signature_from_property_groups_element(elem, device_name)
# signature_harvest_success provides a temporary workaround for an xml-parsing bug
output = correct_flash_offset(elem, device_name, output)
output += capture_field('address_size', determine_address_size(device_name))
if not shared_updi:
output += capture_field(DeviceInfoKeysAvr.PROG_CLOCK_KHZ, '1800')
output += capture_field(DeviceInfoKeysAvr.DEVICE_ID,
"0x{:02X}{:02X}{:02X}".format(signature[0], signature[1], signature[2]))
return output, signature_harvest_success
def main():
"""
Main function for the harvest utility
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Harvests device data from a device data file (.atdf) for one device.
The harvested data can be used to populate a device file in deviceinfo.devices
'''))
parser.add_argument("filename",
help="name (and path) of file to harvest data from"
)
arguments = parser.parse_args()
dict_content = harvest_from_file(arguments.filename)
content = "\nfrom pymcuprog.deviceinfo.eraseflags import ChiperaseEffect\n\n"
content += "DEVICE_INFO = {{\n{}}}".format(dict_content)
print(content)
if __name__ == "__main__":
main()
| SpenceKonde/megaTinyCore | megaavr/tools/libs/pymcuprog/deviceinfo/harvest.py | harvest.py | py | 12,647 | python | en | code | 471 | github-code | 1 | [
{
"api_name": "pymcuprog.deviceinfo.memorynames.MemoryNames.FLASH",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pymcuprog.deviceinfo.memorynames.MemoryNames",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pymcuprog.deviceinfo.memorynames.MemoryNames... |
32150920427 | from django.db.models import F
from django.contrib.auth.models import User
from lunchclub.models import AccessToken
class TokenBackend(object):
def authenticate(self, token=None):
try:
token = AccessToken.objects.get(token=token)
except AccessToken.DoesNotExist:
return None
return token.person.get_or_create_user()
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| Mortal/django-lunchclub | lunchclub/auth.py | auth.py | py | 523 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "lunchclub.models.AccessToken.objects.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "lunchclub.models.AccessToken.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "lunchclub.models.AccessToken",
"line_number": 9,
"usage_... |
42614393833 | import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# Global variables
csv_file = 'RealtimePlot.csv' # Replace with the path to your CSV file
update_interval = 1000 # Update plot every 1000 milliseconds (1 second)
fig, ax = plt.subplots()
ax2 = ax.twinx()
# Function to update the plot
def update_plot():
df = pd.read_csv(csv_file)
plt.cla() # Clear the current plot
ax.plot(df['Time'], df['Distance'], 'b-', label='Distance')
# ax2.plot(dataTime, dataVR, 'r--', label='Velocity')
ax2.plot(df['Time'], df['Speed'], 'g-.', label='Speed Command')
plt.axis('on') # Turn off axis labels and ticks
ax.set_xlabel("Time (s)")
ax.set_ylabel("Distance (mm)")
ax2.set_ylabel("Speed (mm/s)")
plt.title('Distance and Speed Command Real-Time Plot')
# Adjust plot limits if needed
ax.relim()
ax.autoscale_view()
# Redraw the plot
plt.draw()
if __name__ == '__main__':
# Main loop to update the plot periodically
while True:
update_plot()
plt.pause(0.1) # Pause for a short duration to allow the plot to update
| wendycahya/Yaskawa-Communication | IntegratedSystem/Realtime-Video.py | Realtime-Video.py | py | 1,139 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
28015752990 | # @time : 2020/7/12 16:35
# @author : HerbLee
# @file : finance.py
from sanic import Blueprint
from sanic.response import text
from models.funddb import FundDb, CurrentFund
fund = Blueprint("fund", url_prefix="/fund")
@fund.route("/get_data")
async def get_v2_data(request):
return text("it is finance")
@fund.route("/add", methods=["POST",])
async def add_fund(request):
datas = request.form
# await FundDb.create(name=datas['name'][0], code=datas['code'][0],
# price=float(datas['price'][0]), cost=float(datas['cost'][0]), nums=float(datas['nums'][0]))
res = await FundDb.filter(code=datas['code'][0])
if not res:
fdb = await FundDb.create(name=datas['name'][0], code=datas['code'][0])
await CurrentFund.create(code=res[0] if res else fdb, price=float(datas['price'][0]), cost=float(datas['cost'][0]), nums=float(datas['nums'][0]))
return text("success") | HerbLee/dawning | api/finance/fund.py | fund.py | py | 931 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sanic.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sanic.response.text",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.funddb.FundDb.filter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models... |
17127249082 | import numpy as np
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
from multiprocessing import Pool
from functools import partial
from sklearn.model_selection import KFold
from MatrixFactorization import FactorizeMatrix, GetRepresentationError, CreateLatentVariables
from FeatureSimilarity import GetTopGenes
def RandomParams(eta_low, eta_high, lamb1_low, lamb1_high, lamb2_low, lamb2_high, num_reps=20):
hyper_params = np.zeros((num_reps, 3)).astype(np.float32)
hyper_params[:, 0] = np.random.uniform(low=eta_low, high=eta_high, size=(num_reps,))
hyper_params[:, 1] = np.random.uniform(low=lamb1_low, high=lamb1_high, size=(num_reps,))
hyper_params[:, 2] = np.random.uniform(low=lamb2_low, high=lamb2_high, size=(num_reps,))
return hyper_params
def TrainOnParams(params, X, k, neighbors, train_indices, test_indices):
print('.', end='')
n, g = X.shape
eta, lamb1, lamb2 = params
U, V = CreateLatentVariables(n, g, k)
U, V = FactorizeMatrix(X, U, V, neighbors, eta=eta, lamb1=lamb1, lamb2=lamb2, trainIndices=train_indices)
paramError = GetRepresentationError(X, U, V, known_indices=test_indices)
return paramError
def TrainVerboseOnParams(params, X, k, neighbors, train_indices, test_indices):
print('.', end='')
n, g = X.shape
eta, lamb1, lamb2 = params
U, V = CreateLatentVariables(n, g, k)
U, V, trainError, testError = FactorizeMatrix(X, U, V, neighbors, eta=eta, lamb1=lamb1, lamb2=lamb2, trainIndices=train_indices, returnErrorVectors=True)
paramError = GetRepresentationError(X, U, V, known_indices=test_indices)
return paramError, trainError, testError
def CrossValidation(X, k, hyper_params, neighbors=None, foldcount=5, returnVectorDF=False, numProcesses=20):
'''
Runs the matrix factorization algorithm for each specified value of eta and lambda
and computes the reconstruction errors for each run.
Args:
X: An n x g, possibly sparse numpy matrix, where missing entries are indicated by np.nan values,
where n represents the number of samples and g represents the number of genes, or items.
k: The latent dimension of the factorization. Typically, k < min(n, g).
hyper_params: A list of tuples, each corresponding to a setting of hyper parameters (eta, lamb1, lamb2).
foldcount: An integer denoting the number of folds for cross validation.
Returns:
A len(etas) x len(lambs) x foldcount tensor denoting the reconstruction error for each
setting of eta and lambda on each fold.
'''
n, g = X.shape
kf = KFold(n_splits=foldcount, shuffle=True)
errorsDF = pd.DataFrame(np.zeros((len(hyper_params) * foldcount, 5)))
errorsDF.columns = ['eta', 'lamb1', 'lamb2', 'error', 'fold']
#Okay not to shuffle because kf shuffles for you
known_indices = np.argwhere(~np.isnan(X)).astype(np.int32)
np.random.shuffle(known_indices)
if returnVectorDF:
trainErrorDF = pd.DataFrame()
testErrorDF = pd.DataFrame()
fold = 0
df_index = 0
p = Pool(numProcesses)
for train_index, test_index in kf.split(known_indices):
print('Training fold {}'.format(fold))
if returnVectorDF:
foldTrainDF = pd.DataFrame()
foldTestDF = pd.DataFrame()
train_indices = known_indices[train_index].astype(np.int32)
test_indices = known_indices[test_index].astype(np.int32)
if (returnVectorDF):
errorVec = p.map(partial(TrainVerboseOnParams, X=X, k=k, neighbors=neighbors,
train_indices=train_indices, test_indices=test_indices), hyper_params)
for i in range(len(hyper_params)):
eta, lamb1, lamb2 = hyper_params[i]
paramError, trainError, testError = errorVec[i]
foldTrainDF = pd.concat([foldTrainDF,
pd.DataFrame({
'eta{:.5f}_lamb1{:.5f}_lamb2{:.5f}'.format(eta, lamb1, lamb2): trainError
})
], axis=1)
foldTestDF = pd.concat([foldTestDF,
pd.DataFrame({
'eta{:.5f}_lamb1{:.5f}_lamb2{:.5f}'.format(eta, lamb1, lamb2): testError
})
], axis=1)
errorsDF.iloc[df_index] = np.array([eta, lamb1, lamb2, paramError, fold])
df_index += 1
else:
errorVec = p.map(partial(TrainOnParams, X=X, k=k, neighbors=neighbors,
train_indices=train_indices, test_indices=test_indices), hyper_params)
for i in range(len(hyper_params)):
eta, lamb1, lamb2 = hyper_params[i]
paramError = errorVec[i]
errorsDF.iloc[df_index] = np.array([eta, lamb1, lamb2, paramError, fold])
df_index += 1
if returnVectorDF:
foldTrainDF['fold'] = fold
foldTestDF['fold'] = fold
maxEpochs, _ = foldTrainDF.shape
foldTrainDF['epochs'] = np.arange(maxEpochs).astype(np.float32)
foldTestDF['epochs'] = np.arange(maxEpochs).astype(np.float32)
trainErrorDF = pd.concat([trainErrorDF, foldTrainDF])
testErrorDF = pd.concat([testErrorDF, foldTestDF])
fold = fold + 1
p.close()
p.join()
if returnVectorDF:
return errorsDF, trainErrorDF, testErrorDF
else:
return errorsDF
def PlotErrorDF(errorDF, id_vars=['epochs', 'fold'], ax=None):
data = pd.melt(errorDF, id_vars=id_vars, value_name='error', var_name='run')
if ax is not None:
ax = sns.lineplot(x='epochs', y ='error', hue='run', data=data, ax=ax, legend=False)
else:
ax = sns.lineplot(x='epochs', y ='error', hue='run', data=data, legend='brief')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return ax
def PlotParamDF(paramDF, id_vars=['error', 'fold'], ax=None):
data = pd.melt(paramDF, id_vars=id_vars, value_name='param_value', var_name='param_type')
if ax is not None:
ax = sns.lineplot(x='param_value', y='error', hue='param_type', data=data, ax=ax)
else:
ax = sns.lineplot(x='param_value', y='error', hue='param_type', data=data)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return ax | psturmfels/cfAD | CrossValidation.py | CrossValidation.py | py | 6,649 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "seaborn.set",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
... |
22525745523 | '''
print out cmds for training and inference
'''
import argparse
import os
from DPR.dpr.utils.tasks import task_map, train_cluster_map, test_cluster_map
import random
import textwrap
from tqdm import tqdm
def wrap(cmd):
'''
wrap cmd
'''
bs = ' \\\n\t '
return bs.join(textwrap.wrap(cmd,break_long_words=False,break_on_hyphens=False))
def get_cmds(args):
# ================================== Train Stage ===================================
# 1. random sample prompts and score data
prompt_pool_dir = os.path.join(args.output_dir, 'prompt_pool')
random_sample_dir = os.path.join(args.output_dir, 'find_random')
scored_dir = os.path.join(args.output_dir, 'scored')
exp_name = f'train_{args.train_clusters}_test_{args.test_clusters}'
exp_path = os.path.join(args.output_dir, 'experiment', exp_name)
os.makedirs(exp_path, exist_ok=True)
random_port = random.randint(21966,25000)
if args.train_clusters is None:
clusters = list(train_cluster_map.keys())
else:
clusters = args.train_clusters.split('+')
train_cmd_list=[]
for cluster in tqdm(clusters):
for task in train_cluster_map[cluster]:
echo_cmd = f'echo "scoring {task} task of {cluster} cluster..."'
task_cls = task_map.cls_dic[task]()
prompt_pool_path = os.path.join(prompt_pool_dir, cluster, task+'_prompts.json')
random_sample_path = os.path.join(random_sample_dir, cluster, task+'_random_samples.json')
find_random_cmd=\
f'python find_random.py output_path=$PWD/{random_sample_path} \
task_name={task} +ds_size={args.ds_size} L={task_cls.finder_L} \
prompt_pool_path=$PWD/{prompt_pool_path} cache_dir=$PWD/{args.cache_dir}\
hydra.run.dir=$PWD/{exp_path}'
scored_train_path = os.path.join(scored_dir, cluster, task+'_scored_train.json')
scored_valid_path = os.path.join(scored_dir, cluster, task+'_scored_valid.json')
run_scorer_cmd = \
f'accelerate launch --multi_gpu --num_processes {args.gpus} --main_process_port {random_port} \
scorer.py example_file=$PWD/{random_sample_path} \
output_train_file=$PWD/{scored_train_path} \
output_valid_file=$PWD/{scored_valid_path} \
batch_size={task_cls.run_scorer_bsz} task_name={task} \
model_name={args.scr_model} \
prompt_pool_path=$PWD/{prompt_pool_path} cache_dir=$PWD/{args.cache_dir} \
hydra.run.dir=$PWD/{exp_path}'
train_cmd_list += [echo_cmd, find_random_cmd, run_scorer_cmd]
# 2. train a retriever:
echo_cmd = f'echo "start training the retriever..."'
train_retriever_cmd = \
f'python DPR/train_dense_encoder.py train_datasets=[uprise_dataset] dev_datasets=[uprise_valid_dataset] \
train=biencoder_uprise output_dir=$PWD/{exp_path} \
datasets.train_clusters={args.train_clusters} \
datasets.train_file=$PWD/{scored_dir} \
datasets.valid_file=$PWD/{scored_dir} \
datasets.hard_neg=true datasets.multi_task={args.multi_task} \
datasets.top_k={args.retriever_top_k} train.hard_negatives={args.retriever_top_k} \
train.batch_size={args.retriever_bsz} \
train.num_train_epochs={args.retriever_epoch} \
datasets.prompt_pool_path=$PWD/{prompt_pool_dir} \
datasets.prompt_setup_type={args.retriever_prompt_setup} \
datasets.task_setup_type=q encoder.cache_dir=$PWD/{args.cache_dir}\
hydra.run.dir=$PWD/{exp_path}'
train_cmd_list += [echo_cmd, train_retriever_cmd]
# write train cmds in train.sh
train_cmd_list = [wrap(cmd) for cmd in train_cmd_list]
# write run.sh
with open(f"{exp_path}/train.sh","w") as f:
f.write("\n\n".join(train_cmd_list))
print('saved training cmds to: ', f"{exp_path}/train.sh")
# ================================== Inference Stage ===================================
inference_cmd_list = []
# 1. encode the whole prompt pool, using prompt encoder of the trained retriever
echo_cmd = f'echo "encoding the whole prompt pool..."'
gen_emb_cmd = \
f"python DPR/generate_dense_embeddings.py model_file=$PWD/{exp_path}/dpr_biencoder.best_valid \
ctx_src=dpr_uprise shard_id=0 num_shards=1 \
out_file=$PWD/{exp_path}/dpr_enc_index \
ctx_sources.dpr_uprise.train_clusters={args.train_clusters} \
ctx_sources.dpr_uprise.prompt_pool_path=$PWD/{prompt_pool_dir} \
ctx_sources.dpr_uprise.prompt_setup_type={args.retriever_prompt_setup} \
encoder.cache_dir=$PWD/{args.cache_dir} \
hydra.run.dir=$PWD/{exp_path}"
inference_cmd_list += [echo_cmd, gen_emb_cmd]
def get_inference_cmd(num_prompts=3, retriever='uprise'):
assert retriever in [None, 'Random', 'Bm25', 'Sbert', 'Uprise']
random = True if retriever == "random" else False
echo_cmd = f'echo "running inference on {task} task of {cluster} cluster with {retriever} retriever..."'
pred_outpath = os.path.join(exp_path, f'preds_for_{cluster}', f'{task}_prompts{args.num_prompts}_retriever{retriever}_preds.json')
run_inference_cmd = \
f"accelerate launch --num_processes 1 --main_process_port {random_port} \
inference.py prompt_file=$PWD/{retrieve_prompts_outpath} \
task_name={task} \
output_file=$PWD/{pred_outpath} \
res_file=$PWD/{eval_res_outpath} \
batch_size={args.inference_bsz} \
train_clusters={args.train_clusters} \
model_name={args.inf_model} \
prompt_pool_path=$PWD/{prompt_pool_dir} \
num_prompts={num_prompts} \
random_sample={random} random_seed=42 \
cache_dir=$PWD/{args.cache_dir} \
hydra.run.dir=$PWD/{exp_path}"
return [echo_cmd, run_inference_cmd]
# 2. retrieve positive prompts from the prompt pool, for each task in the testing clusters:
test_clusters = args.test_clusters.split('+')
for cluster in test_clusters:
eval_res_outpath = os.path.join(exp_path, f'eval_res_for_{cluster}.txt')
for task in test_cluster_map[cluster]:
echo_cmd = f'echo "uprise retrieves on {task} task of {cluster} cluster..."'
retrieve_prompts_outpath = os.path.join(exp_path, f'uprise_prompts_for_{cluster}', f'{task}_prompts.json')
retrieve_prompts_cmd = \
f'python DPR/dense_retriever.py model_file=$PWD/{exp_path}/dpr_biencoder.best_valid \
qa_dataset=qa_uprise ctx_datatsets=[dpr_uprise] \
encoded_ctx_files=["$PWD/{exp_path}/dpr_enc_index_*"]\
out_file=$PWD/{retrieve_prompts_outpath} \
datasets.qa_uprise.task_name={task} \
datasets.qa_uprise.task_setup_type=q \
datasets.qa_uprise.cache_dir=$PWD/{args.cache_dir} \
n_docs={args.num_prompts} \
ctx_sources.dpr_uprise.prompt_pool_path=$PWD/{prompt_pool_dir} \
ctx_sources.dpr_uprise.train_clusters={args.train_clusters} \
ctx_sources.dpr_uprise.prompt_setup_type={args.retriever_prompt_setup} \
encoder.cache_dir=$PWD/{args.cache_dir} \
hydra.run.dir={exp_path}'
inference_cmd_list += [echo_cmd, retrieve_prompts_cmd]
# vanilla zero shot
inference_cmd_list += get_inference_cmd(num_prompts=0, retriever=None)
# uprise zero shot
inference_cmd_list += get_inference_cmd(num_prompts=args.num_prompts, retriever='Uprise')
# Ablations: replace uprise retriever with random, bm25 and sbert
if args.retrieve_random:
inference_cmd_list += get_inference_cmd(num_prompts=args.num_prompts, retriever='Random')
if args.retrieve_bm25:
echo_cmd = f'echo "bm25 retrieves on {task} task of {cluster} cluster..."'
retrieve_prompts_outpath = os.path.join(exp_path, f'bm25_prompts_for_{cluster}', f'{task}_prompts.json')
retrieve_bm25_prompts_cmd = \
f'python retrieve_bm25.py \
train_clusters={args.train_clusters} \
task_name={task} cache_dir=$PWD/{args.cache_dir} \
prompt_pool_path=$PWD/{prompt_pool_dir} \
out_file=$PWD/{retrieve_prompts_outpath} \
prompt_setup_type={args.retriever_prompt_setup} n_docs={args.num_prompts} \
hydra.run.dir=$PWD/{exp_path} '
inference_cmd_list += [echo_cmd, retrieve_bm25_prompts_cmd]
inference_cmd_list += get_inference_cmd(num_prompts=args.num_prompts, retriever='Bm25')
if args.retrieve_sbert:
echo_cmd = f'echo "sbert retrieves on {task} task of {cluster} cluster..."'
retrieve_prompts_outpath = os.path.join(exp_path, f'sbert_prompts_for_{cluster}', f'{task}_prompts.json')
retrieve_sbert_prompts_cmd = \
f'python retrieve_sbert.py \
train_clusters={args.train_clusters} \
task_name={task} cache_dir=$PWD/{args.cache_dir} \
prompt_pool_path=$PWD/{prompt_pool_dir} \
out_file=$PWD/{retrieve_prompts_outpath} \
prompt_setup_type={args.retriever_prompt_setup} n_docs={args.num_prompts} \
hydra.run.dir=$PWD/{exp_path} '
inference_cmd_list += [echo_cmd, retrieve_sbert_prompts_cmd]
inference_cmd_list += get_inference_cmd(num_prompts=args.num_prompts, retriever='Sbert')
inference_cmd_list = [wrap(cmd) for cmd in inference_cmd_list]
# write run.sh
with open(f"{exp_path}/inference.sh","w") as f:
f.write("\n\n".join(inference_cmd_list))
print('saved inference cmds to: ', f"{exp_path}/inference.sh")
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir',
type=str, help='Directory for saving all the intermediate and final outputs.',
default='my_data')
parser.add_argument('--cache_dir',
type=str, help='Directory for caching the huggingface models and datasets.',
default='../cache')
parser.add_argument('--gpus',
type=int, help='number of gpus to use',
default=8)
# training
parser.add_argument('--train_clusters',
type=str,
help='a string concatenating task clusters for training, \
e.g., `nli+common_reason` means nli and common_reason task clusters \
all supoorted clusters are in DPR.dpr.utils.tasks.train_cluster_map \
clusters=`all supported clsuters` when the passed value is None',
default=None)
parser.add_argument('--retriever_prompt_setup',
type=str,
help='setup type of prompt, recommend setting as `qa` for cross-task training \
and `q` for task-specific training',
default="qa")
parser.add_argument('--ds_size',
type=int,
help='number of maximum data examples sampled from each training dataset',
default=10000)
parser.add_argument('--scr_model',
type=str,
help='Huggingface model for scoring data',
default="EleutherAI/gpt-neo-2.7B")
parser.add_argument("--multi_task",
action="store_true",
help="True for multi-task and False for task-specific, \
the difference reflects on the sampling of negative prompts ONLY \
refer to `UpriseDataset` in `DPR/dpr/data/biencoder_data.py` for details")
parser.add_argument('--retriever_top_k',
type=int,
help='number of k (hard) negatives for training the retriever',
default=20)
parser.add_argument('--retriever_bsz',
type=int,
help='sum of batch size of all gpus, NOT per gpu',
default=16)
parser.add_argument('--retriever_epoch',
type=int,
help='maximum training epoch, recommend setting as `3` when cross-task training, \
and `10` when task-specific training',
default=3)
# inference
parser.add_argument('--inf_model',
type=str,
help='Huggingface model for inference',
default="EleutherAI/gpt-neo-2.7B")
parser.add_argument('--test_clusters',
type=str,
help='a string concatenating task clusters for training, \
e.g., `nli+common_reason` means nli and common_reason task clusters \
all supoorted clusters are in DPR.dpr.utils.tasks.test_cluster_map',
default="nli+common_reason")
parser.add_argument('--num_prompts',
type=int,
help='maximum number of retrieved prompts to be concatenated before the task input',
default=3)
parser.add_argument('--retrieve_random',
action="store_true",
help='whether to random retrieve from our prompt pool, and run a baseline')
parser.add_argument('--retrieve_bm25',
action="store_true",
help='whether to use bm25 retriever to retrieve from our prompt pool, and run a baseline')
parser.add_argument('--retrieve_sbert',
action="store_true",
help='whether to use sbert to retrieve from our prompt pool, and run a baseline')
parser.add_argument('--inference_bsz',
type=int,
help='sum of batch size of all gpus, NOT per gpu',
default=1)
args = parser.parse_args()
get_cmds(args)
| microsoft/LMOps | uprise/get_cmds.py | get_cmds.py | py | 14,669 | python | en | code | 2,623 | github-code | 1 | [
{
"api_name": "textwrap.wrap",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_numb... |
42113373089 | import os
import PIL
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
imgs = []
directory = "output/grids"
# for filename in os.listdir(directory):
# print(filename)
# image = PIL.Image.open(os.path.join(directory, filename))
# imgs.append(np.array(image))
for row in range(9):
for col in range(9):
image = PIL.Image.open(f"output/grids/dim_{row}_{col}.jpg")
imgs.append(np.array(image))
fig = plt.figure(figsize=(9,9))
grid = ImageGrid(fig, 111,
nrows_ncols=(9,9),
axes_pad=0.1,
)
for ax, im in zip(grid, imgs):
ax.imshow(im)
plt.savefig("grid.jpg")
| benjaminlyons/lear | display_grid.py | display_grid.py | py | 664 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
... |
32434624148 | import logging
import os
import signal
import watchdog.events
import watchdog.observers.polling
import watchdog_gevent
# https://github.com/Bogdanp/dramatiq/blob/master/dramatiq/__main__.py
def setup_file_watcher(path, callback, use_polling=False):
"""Sets up a background thread that watches for source changes and
automatically sends SIGHUP to the current process whenever a file
changes.
"""
if use_polling:
observer_class = watchdog.observers.polling.PollingObserver
else:
observer_class = watchdog_gevent.Observer
file_event_handler = watchdog.events.PatternMatchingEventHandler(patterns=['*.py'])
# file_event_handler = watchdog.events.FileSystemEventHandler()
# monkey patching is perfectly fine.
file_event_handler.on_any_event = callback
# start the watcher
file_watcher = observer_class()
file_watcher.schedule(file_event_handler, path, recursive=True)
print("000000000000000000000000")
file_watcher.start()
print("&&&&&&&&&&&&&&&&&&&&&&&&")
return file_watcher
if __name__ == "__main__":
"""runs this in debugger to see file changes."""
import gevent
def cb(event):
print(event)
file_watcher = setup_file_watcher('.', cb)
while True:
gevent.sleep(1)
file_watcher.stop()
file_watcher.join()
| geyang/ml_logger | scratch/old/vis_server_gevent_deprecated/file_watcher.py | file_watcher.py | py | 1,341 | python | en | code | 176 | github-code | 1 | [
{
"api_name": "watchdog.events.observers",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "watchdog.events",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "watchdog_gevent.Observer",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_... |
2014274942 | from datetime import datetime
from tqdm.auto import tqdm
import torch
import torch.nn as nn
from torch.nn.utils import clip_grad_norm_
from sklearn import metrics
import numpy as np
from model import VLPForTokenClassification, model_config_factory
from dataset import dataset_factory
from training.utils import get_tokenizer, get_scheduler, get_model_summary
import warnings
warnings.filterwarnings('ignore')
POS_TAGS = [
"XX", "``", "$", "''", "*", ",", "-LRB-", "-RRB-", ".", ":", "ADD", "AFX",
"CC", "CD", "DT", "EX", "FW", "HYPH", "IN", "JJ", "JJR", "JJS", "LS", "MD",
"NFP", "NN", "NNP", "NNPS", "NNS", "PDT", "POS", "PRP", "PRP$", "RB",
"RBR", "RBS", "RP", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP",
"VBZ", "VERB", "WDT", "WP", "WP$", "WRB"
]
NER_TAGS = [
"O", "B-PERSON", "I-PERSON", "B-NORP", "I-NORP", "B-FAC", "I-FAC", "B-ORG",
"I-ORG", "B-GPE", "I-GPE", "B-LOC", "I-LOC", "B-PRODUCT", "I-PRODUCT",
"B-DATE", "I-DATE", "B-TIME", "I-TIME", "B-PERCENT", "I-PERCENT",
"B-MONEY", "I-MONEY", "B-QUANTITY", "I-QUANTITY", "B-ORDINAL", "I-ORDINAL",
"B-CARDINAL", "I-CARDINAL", "B-EVENT", "I-EVENT", "B-WORK_OF_ART",
"I-WORK_OF_ART", "B-LAW", "I-LAW", "B-LANGUAGE", "I-LANGUAGE"
]
# POS_TAGS = [
# 'XX', '"', "''", '#', '$', '(', ')', ',', '.', ':', '``', 'CC', 'CD', 'DT',
# 'EX', 'FW', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NN', 'NNP', 'NNPS',
# 'NNS', 'NN|SYM', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP',
# 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'WDT', 'WP',
# 'WP$', 'WRB'
# ]
# NER_TAGS = [
# 'O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC',
# 'I-MISC'
# ]
TRAIN_DEBUG_STEP = 1000
TRAIN_EVAL_STEP = 3000
# TRAIN_DEBUG_STEP = 500
# TRAIN_EVAL_STEP = 1000
def run_token_cls_training(args):
current_date_str = datetime.now().strftime('%m%d%Y_%H%M%S')
tokenizer, vocab_size = get_tokenizer(args)
config = model_config_factory(args.model_name)
model = VLPForTokenClassification(**config,
vocab_size=vocab_size,
num_ner_tags=len(NER_TAGS),
num_pos_tags=len(POS_TAGS),
dropout=0.0)
if args.pretrained_model_path:
pretrained = torch.load(args.pretrained_model_path)["model_state"]
model.load_state_dict(pretrained, strict=False)
model.train()
train_loader, test_loader = dataset_factory(args, config, tokenizer)
optimizer, lr_scheduler = get_scheduler(model, args, config,
len(train_loader))
get_model_summary(model, args.image_size, args.max_text_len,
args.num_channels)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
best_eval_loss = np.inf
count = 0
for epoch in range(args.num_epochs):
training_losses = []
tr_ner_losses = []
tr_pos_losses = []
tr_txt_losses = []
f1_micro_scores_ner, f1_macro_scores_ner = [], []
f1_micro_scores_pos, f1_macro_scores_pos = [], []
progress_bar = tqdm(train_loader,
desc='Training',
position=0,
leave=True)
for step, (images, tokens, tgt, tgt_y, tgt_mask, ner_tags,
pos_tags) in enumerate(progress_bar):
images, tgt = images.to(device), tgt.to(device)
tgt_y, tgt_mask = tgt_y.to(device), tgt_mask.to(device)
ner_tags, pos_tags = ner_tags.to(device), pos_tags.to(device)
tgt_mask = tgt_mask.squeeze(1)
logits_txt, logits_ner, logits_pos = model(images,
tgt,
tgt_mask=tgt_mask)
loss_fct = nn.CrossEntropyLoss(
label_smoothing=args.label_smoothing)
loss_txt = loss_fct(logits_txt.view(-1, vocab_size),
tgt_y.view(-1))
loss_ner = loss_fct(logits_ner.view(-1, len(NER_TAGS)),
ner_tags.view(-1))
loss_pos = loss_fct(logits_pos.view(-1, len(POS_TAGS)),
pos_tags.view(-1))
loss = loss_txt + loss_ner + loss_pos
loss.backward()
if args.use_clip_grad:
clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
training_losses.append(loss.item())
tr_txt_losses.append(loss_txt.item())
tr_ner_losses.append(loss_ner.item())
tr_pos_losses.append(loss_pos.item())
count += 1
preds_ner = logits_ner.argmax(dim=-1)
preds_pos = logits_pos.argmax(dim=-1)
target_ner, preds_ner = ner_tags.cpu().detach().numpy(
), preds_ner.cpu().detach().numpy()
target_pos, preds_pos = pos_tags.cpu().detach().numpy(
), preds_pos.cpu().detach().numpy()
f1_score_micro_ner, f1_score_macro_ner = get_metrics(
target_ner.flatten(), preds_ner.flatten(), display=False)
f1_micro_scores_ner.append(f1_score_micro_ner)
f1_macro_scores_ner.append(f1_score_macro_ner)
f1_score_micro_pos, f1_score_macro_pos = get_metrics(
target_pos.flatten(), preds_pos.flatten(), display=False)
f1_micro_scores_pos.append(f1_score_micro_pos)
f1_macro_scores_pos.append(f1_score_macro_pos)
if step % TRAIN_DEBUG_STEP == 0:
print("train target_ner", [NER_TAGS[t] for t in target_ner[0]])
print("train preds_ner", [NER_TAGS[t] for t in preds_ner[0]])
print("!" * 100)
print("train target_pos", [POS_TAGS[t] for t in target_pos[0]])
print("train preds_pos", [POS_TAGS[t] for t in preds_pos[0]])
print("#" * 50)
idxs = torch.argmax(logits_txt, dim=-1)
for i, idx in enumerate(idxs):
if i == 1:
break
print('#' * 100)
print(
tokenizer.decode(tokens[i].tolist()).replace(
' [PAD] ', '').replace('[PAD]', ''))
print('!' * 50)
print(
tokenizer.decode(idx.tolist()).replace(
' [PAD] ', '').replace('[PAD]', ''))
print('#' * 100)
logs = {
"epoch": epoch + 1,
# "loss": f"{np.mean(training_losses[-2000:]):.3f}",
"loss_ner": f"{np.mean(tr_ner_losses[-2000:]):.3f}",
"loss_pos": f"{np.mean(tr_pos_losses[-2000:]):.3f}",
"loss_txt": f"{np.mean(tr_txt_losses[-2000:]):.3f}",
# "f1_micro_ner": f"{np.mean(f1_micro_scores_ner[-2000:]):.3f}",
"f1_ner:": f"{np.mean(f1_macro_scores_ner[-2000:]):.3f}",
# "f1_micro_pos": f"{np.mean(f1_micro_scores_pos[-2000:]):.3f}",
"f1_pos:": f"{np.mean(f1_macro_scores_pos[-2000:]):.3f}",
"lr": lr_scheduler.get_last_lr()[0],
"step": count
}
progress_bar.set_postfix(**logs)
if not (count % TRAIN_EVAL_STEP):
eval_loss = evaluate(model,
test_loader,
tokenizer,
device=device,
vocab_size=vocab_size,
epoch=epoch,
tr_step=step,
current_date_str=current_date_str)
if eval_loss < best_eval_loss:
torch.save(
{
'model_state': model.state_dict(),
# 'optimizer_state': optimizer.state_dict(),
},
args.out_model_path)
best_eval_loss = eval_loss
model.train()
torch.save({
'model_state': model.state_dict(),
}, f"last_{args.out_model_path}")
return model, optimizer
def get_metrics(targets, outputs, display=True):
accuracy = metrics.accuracy_score(targets, outputs)
f1_score_micro = metrics.f1_score(targets, outputs, average='micro')
f1_score_macro = metrics.f1_score(targets, outputs, average='macro')
if display:
print(f"Accuracy Score = {accuracy}")
print(f"F1 Score (Micro) = {f1_score_micro}")
print(f"F1 Score (Macro) = {f1_score_macro}")
return f1_score_micro, f1_score_macro
def evaluate(model,
test_loader,
tokenizer,
vocab_size,
device,
epoch=0,
tr_step=0,
current_date_str="dummy_date"):
print("usao")
model.eval()
total_loss_ner = 0
total_loss_pos = 0
total_loss_txt = 0
total_loss = 0
all_targets_ner = []
all_preds_ner = []
all_targets_pos = []
all_preds_pos = []
with torch.no_grad():
for step, (images, tokens, tgt, tgt_y, tgt_mask, ner_tags,
pos_tags) in enumerate(tqdm(test_loader)):
images, tgt = images.to(device), tgt.to(device)
tgt_y, tgt_mask = tgt_y.to(device), tgt_mask.to(device)
ner_tags, pos_tags = ner_tags.to(device), pos_tags.to(device)
tgt_mask = tgt_mask.squeeze(1)
logits_txt, logits_ner, logits_pos = model(images,
tgt,
tgt_mask=tgt_mask)
loss_fct = nn.CrossEntropyLoss()
loss_txt = loss_fct(logits_txt.view(-1, vocab_size),
tgt_y.view(-1))
loss_ner = loss_fct(logits_ner.view(-1, len(NER_TAGS)),
ner_tags.view(-1))
loss_pos = loss_fct(logits_pos.view(-1, len(POS_TAGS)),
pos_tags.view(-1))
loss = loss_txt + loss_ner + loss_pos
preds_ner = logits_ner.argmax(dim=-1)
preds_pos = logits_pos.argmax(dim=-1)
target_ner, preds_ner = ner_tags.cpu().detach().numpy(
), preds_ner.cpu().detach().numpy()
target_pos, preds_pos = pos_tags.cpu().detach().numpy(
), preds_pos.cpu().detach().numpy()
all_targets_ner.extend(target_ner.flatten())
all_preds_ner.extend(preds_ner.flatten())
all_targets_pos.extend(target_pos.flatten())
all_preds_pos.extend(preds_pos.flatten())
if step % 300 == 0:
print("eval target_ner", [NER_TAGS[t] for t in target_ner[0]])
print("eval preds_ner", [NER_TAGS[t] for t in preds_ner[0]])
print("!" * 100)
print("eval target_pos", [POS_TAGS[t] for t in target_pos[0]])
print("eval preds_pos", [POS_TAGS[t] for t in preds_pos[0]])
print("#" * 50)
idxs = torch.argmax(logits_txt, dim=-1)
for i, idx in enumerate(idxs):
if i == 1:
break
print('#' * 100)
print(
tokenizer.decode(tokens[i].tolist()).replace(
' [PAD] ', '').replace('[PAD]', ''))
print('!' * 50)
print(
tokenizer.decode(idx.tolist()).replace(
' [PAD] ', '').replace('[PAD]', ''))
print('#' * 100)
total_loss += loss.item()
total_loss_txt += loss_txt.item()
total_loss_ner += loss_ner.item()
total_loss_pos += loss_pos.item()
total_loss /= len(test_loader)
total_loss_txt /= len(test_loader)
total_loss_ner /= len(test_loader)
total_loss_pos /= len(test_loader)
print(f"Valid Loss: {total_loss}")
print(f"Valid Loss Text: {total_loss_txt}")
print(f"Valid Loss NER: {total_loss_ner}")
print(f"Valid Loss POS: {total_loss_pos}")
get_metrics(all_targets_ner, all_preds_ner, display=True)
print(
metrics.classification_report(all_targets_ner,
all_preds_ner,
target_names=NER_TAGS,
labels=list(range(len(NER_TAGS)))))
get_metrics(all_targets_pos, all_preds_pos, display=True)
print(
metrics.classification_report(all_targets_pos,
all_preds_pos,
target_names=POS_TAGS,
labels=list(range(len(POS_TAGS)))))
return total_loss
| filipbasara0/visual-language-processing | training/train_token_cls.py | train_token_cls.py | py | 13,267 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "train... |
4947865207 | import time
import unittest
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class ITTroubleshooterSearchTest(unittest.TestCase):
def setUp(self):
caps = {'browserName': os.getenv('firefox', 'firefox')}
self.browser = webdriver.Remote(
command_executor='http://192.168.1.2:4444/wd/hub',
desired_capabilities=caps
)
# def setUp(self):
# self.browser = webdriver.Chrome()
#def SET:
def test_ITTroubleshooter_search_for(self):
browser = self.browser
browser.get('https://www.google.com/')
search_box = browser.find_element_by_name('q')
search_box.send_keys('ittroubleshooter.in')
search_box.send_keys(Keys.RETURN)
browser.get('https://ittroubleshooter.in/')
time.sleep(3) # simulate long running test
def tearDown(self):
self.browser.quit() # quit vs close?
if __name__ == '__main__':
unittest.main()
| kumargaurav522/selenium | test2.py | test2.py | py | 1,003 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Remote",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.web... |
74736245474 | from flask import Blueprint,flash,url_for,redirect,render_template,request
from flask_login import login_required
from ksk.models import Pizza
from ksk import db
from ksk.pizza.utils import save_img_for_pizza
from ksk.pizza.forms import PizzaForm
pizzas = Blueprint('pizzas',__name__)
########## Pizza Upload #############
@pizzas.route('/upload/pizza',methods=['GET','POST'])
@login_required
def upload_pizza():
form = PizzaForm()
if form.validate_on_submit():
pizza = Pizza(form.product_code.data,form.type.data,form.product_detail.data)
if form.image_file.data:
product_img = save_img_for_pizza((form.image_file.data))
pizza.image_file = product_img
db.session.add(pizza)
db.session.commit()
flash('New Product has been uploaded!','info')
return redirect(url_for('pizzas.view_pizza'))
return render_template('product_upload_form.html',category="Pizza",title="Pizza Upload",form=form)
######### Pizza Client View #############
@pizzas.route('/product/pizza')
def view_pizza():
page = request.args.get('page',1,type=int)
pizzas = Pizza.query.order_by(Pizza.date_posted.desc()).paginate(page=page,per_page=3)
return render_template('pizza_view.html',title="Pizza",pizzas=pizzas)
| ZiG-Z/KSK-Bakery | ksk/pizza/routes.py | routes.py | py | 1,280 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ksk.pizza.forms.PizzaForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ksk.models.Pizza",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ksk.pizza.u... |
3168586227 | import os
import csv
import glob
import json
import torch
import warnings
import itertools
import torchaudio
import numpy as np
from pathlib import Path
from tqdm import tqdm
from PIL import Image as PILImage
from itertools import cycle, islice, chain
from einops import rearrange, repeat
import multiprocessing as mp
import torch.utils.data as data
import torch.nn.functional as F
from .audio import (
make_transform, _extract_kaldi_spectrogram
)
from .image import make_clip_image_transform as make_image_transform
from .audio_text import (
build_dataloader, build_audiocaps_data_list, AudioTextDatasetSrc
)
from clip import tokenize
class ImageTextDatasetSrc(AudioTextDatasetSrc):
""" `__getitem__' loads raw file from disk.
"""
def __init__(self, cfg, data_list, train):
super().__init__(cfg, data_list, train)
self.frame_key = cfg.frame_key
self.transform_image = make_image_transform(cfg.resolution)
def _image2embed(self, fname):
try:
image = np.load(fname)["v"]
except Exception as e:
image = np.random.rand(self.cfg.embed_dim).astype("float32")
warnings.warn(f"use random image instead because `{e}` {fname}.")
return image
def _image2numpy(self, fname):
if fname is not None:
try:
if fname.endswith(".npz"):
images = np.load(fname)
images = [images[key] for key in images.files if len(images[key]) != 0]
idx = np.random.choice(len(images), 1)[0] if self.train else int(np.ceil(len(images) / 2)) - 1
image = images[idx]
else:
image = PILImage.open(fname)
image = self.transform_image(image).cpu().numpy()
except Exception as e:
h = w = self.cfg.resolution
image = PILImage.fromarray(
(np.random.rand(h, w, 3) * 256).astype(np.uint8)
)
warnings.warn(f"use random image instead because `{e}` {fname}.")
image = self.transform_image(image).cpu().numpy()
else:
image = np.array([[[1]]])
return image
def __getitem__(self, index):
akey = self.aclip_key
fkey = self.frame_key
name = self.dataset[index]["id"]
sub_dir = self.dataset[index]["dir"]
label_str = self.dataset[index]["label_str"]
label_int = self.dataset[index]["label_int_bpe"]
aclip = self.dataset[index][akey][0]
frame = images = self.dataset[index][fkey]
sub_dir = "" if len(sub_dir) == 0 else f"{sub_dir}/"
aclip = aclip if aclip == name else f"{akey}/{name}.{aclip}"
aclip_file = f"{self.cfg.data_root}/{sub_dir}{aclip}"
# image
frame_emb_file = None
if isinstance(frame, str):
frame_file = f"{self.cfg.data_root}/{sub_dir}{fkey}/{name}.{frame}"
else:
idx = np.random.choice(len(images), 1)[0] if self.train else int(np.ceil(len(images) / 2)) - 1
frame_file = f"{self.cfg.data_root}/{sub_dir}{fkey}/{name}.{images[idx]}"
if self.cfg.frame_emb is not None:
frame_emb_file = f"{self.cfg.data_root}/{self.cfg.frame_emb}/{name}.{images[idx].rsplit('.', 1)[0]}.npz"
# higher priority for pre-computed frame embeddings
image = self._image2embed(frame_emb_file) if frame_emb_file is not None else self._image2numpy(frame_file)
# audio
audio = self._audio2numpy_cst(aclip_file)
if not self.cfg.audio.eval_norms and len(self.audio_norms) == 2:
mean, std = self.audio_norms
audio = (audio - mean) / std
#if self.train and self.transform_fbank is not None:
if not self.cfg.audio.eval_norms and self.train and self.transform_fbank is not None:
audio = self.transform_fbank(audio)
if self.train:
idx = np.random.choice(len(label_int), 1)[0]
text = label_int[idx]
else:
text = label_int
audio = audio[None]
image = image[None]
item = {"image": image, "audio": audio, "text": text, "name": name}
return item
def __len__(self):
return self.length
class ImageTextCollator:
def __init__(self, device=torch.device("cpu")):
# RuntimeError: cannot pin 'torch.cuda.FloatTensor' only dense CPU tensors can be pinned
# when pin_memory is true, the collator has to return CPU tensors
self.device = device
def __call__(self, records):
union = {
k: [record.get(k) for record in records] for k in set().union(*records)
}
name = union["name"]
text_list = union["text"]
if isinstance(text_list[0][0], int): # train
pass
""" https://stackoverflow.com/a/43149308
lengths = [len(x) for x in text_list]
max_len = max(lengths)
text = np.zeros((len(text_list), max_len), int)
mask = np.arange(max_len) < np.array(lengths)[:, None]
text[mask] = np.concatenate(text_list)
"""
elif isinstance(text_list[0][0], list): # test
text_list = list(itertools.chain.from_iterable(text_list))
#name = list(itertools.chain.from_iterable(name))
else:
raise ValueError(f"unrecognized `{type(text_list[0][0])}`")
# https://stackoverflow.com/a/38619333
text = np.array(list(itertools.zip_longest(*text_list, fillvalue=0))).T
return (
np.concatenate(union["image"], axis=0),
text,
name,
)
def build_dataloader_audiocaps(cfg, data_name, shuffle=True, train=True):
name_list = data_name.split(",")
dataset = list()
for name in name_list:
subset = build_audiocaps_data_list(cfg.running, name)
dataset.extend(subset)
return build_dataloader(cfg, dataset, ImageTextDatasetSrc, shuffle=shuffle, train=train, collator_cls=ImageTextCollator)
def build_image_text_dataloader(cfg, data_name, *args, shuffle=True, train=True, **kwargs):
if data_name.startswith("audiocaps"): # can only do w/ AudioCaps
return build_dataloader_audiocaps(
cfg, data_name, shuffle=shuffle, train=train
)
else:
raise ValueError(f"unrecognized dataset `{data_name}`.")
| zhaoyanpeng/vipant | cvap/data/image_text.py | image_text.py | py | 6,470 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "audio_text.AudioTextDatasetSrc",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "image.make_clip_image_transform",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 39,
"usage_type": "call"
},
{
"api_nam... |
70388329635 | # Libraries
import streamlit as st
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import plotly.subplots as sp
# Global Variables
theme_plotly = None # None or streamlit
week_days = ['Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', 'Sunday']
# Layout
st.set_page_config(page_title='Transactions - The Whales of Near',
page_icon=':bar_chart:📈', layout='wide')
st.title('🌌Transactions')
# Style
with open('style.css')as f:
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
# Data Sources
@st.cache()
def get_data(query):
if query == 'TransactionType_each_Wallet':
return pd.read_json('https://node-api.flipsidecrypto.com/api/v2/queries/a89edfff-1085-4954-a859-6f2abd0b639f/data/latest')
elif query == 'Daily_TX_Type':
return pd.read_json('https://api.flipsidecrypto.com/api/v2/queries/999b8676-c867-4638-8a0b-6833dbef705c/data/latest')
elif query == 'Daily_TX_FEE_Type':
return pd.read_json('https://node-api.flipsidecrypto.com/api/v2/queries/7232fce4-57e1-41e9-a356-2a3b857e4687/data/latest')
elif query == 'Total_Transaction_Comparison':
return pd.read_json('https://node-api.flipsidecrypto.com/api/v2/queries/fb5217ce-4ab7-44ce-83e5-8aa7b9a79e96/data/latest')
elif query == 'Top10_Platforms':
return pd.read_json('https://api.flipsidecrypto.com/api/v2/queries/934aaafb-dde1-41b4-ad66-eb7a15f30e5c/data/latest')
elif query == 'Top10_TransactionType':
return pd.read_json('https://api.flipsidecrypto.com/api/v2/queries/e54eaa63-ab14-4313-af7a-fdff2d8abecc/data/latest')
elif query == 'TX_SUCC_Fail':
return pd.read_json('https://api.flipsidecrypto.com/api/v2/queries/a21367b3-6df5-447c-996e-919d2f89b6e7/data/latest')
elif query == 'D_TX_type':
return pd.read_json('https://api.flipsidecrypto.com/api/v2/queries/645f8b4c-f300-49a5-b37a-dc3a5ab7da59/data/latest')
elif query == 'D_Fee_Type':
return pd.read_json('https://api.flipsidecrypto.com/api/v2/queries/4b0ac171-828a-4afa-ae66-0758801aa3d4/data/latest')
return None
TransactionType_each_Wallet = get_data('TransactionType_each_Wallet')
Daily_TX_Type = get_data('Daily_TX_Type')
Daily_TX_FEE_Type = get_data('Daily_TX_FEE_Type')
Total_Transaction_Comparison = get_data('Total_Transaction_Comparison')
Top10_Platforms = get_data('Top10_Platforms')
Top10_TransactionType = get_data('Top10_TransactionType')
TX_SUCC_Fail = get_data('TX_SUCC_Fail')
D_TX_type = get_data('D_TX_type')
D_Fee_Type = get_data('D_Fee_Type')
df = TransactionType_each_Wallet
df2 = Daily_TX_Type
df3 = Daily_TX_FEE_Type
df4 = Total_Transaction_Comparison
df5 = Top10_Platforms
df6 = Top10_TransactionType
df6 = TX_SUCC_Fail
df7 = D_TX_type
df8 = D_Fee_Type
######################################################################################################################
st.write(""" ### Transaction Concept ## """)
st.write("""
A Simply put, cryptocurrency transaction is a transfer of information made between blockchain addresses. These transfers have to be signed with a private key that corresponds to its address. Signed transactions are broadcast to the network of nodes, active computers that follow a specific set of rules to validate transactions and blocks. Valid transactions need to be confirmed by being included in blocks through the process of mining.[[4]](https://www.bitstamp.net/learn/crypto-101/how-do-cryptocurrency-transactions-work/) """)
st.info(""" ##### In This Transaction Section you can find: ####
* Whales Different Type of Transactions Activity
* Whales Transactions Compare to other Users
* Whales Weekly Transaction Prespective view (Last 12 Month)
* Whales Daily Transaction Zoom in (Last 3 Month)
""")
#####################################################################################
st.text(" \n")
st.text(" \n")
st.write(""" ## Whales Transaction Activity """)
st.write(""" 95.1% percent of whale transactions were transferring, while Staking was ranked second-to-last before deleted accounts. What is really shocking here is that there was no NFT trading or mint transaction during the 12-month period (Checked with another query), and the number of swap transactions was relatively low. While four of the top twenty whales had no transaction during the last 12 months, "5c33c6218d47e00ef229f60da78d0897e1ee9665312550b8afd5f9c7bc6957d2" ranked first with 147,334 transactions which all of them is transferring. The user seems to be a bot due to its more than ten transactions per minute performance. "d73888a2619c7761735f23c798536145dfa87f9306b5f21275eb4b1a7ba971b9" Ranked third with 20,284 number of transactions we discuss these two whales more in CEX exchange section. "token.sweat" stood first among platforms with a significant difference from others (with 20,834 whale usage in one year). """)
# Transaction Type In Each Wallet
fig = px.bar(df.sort_values(["TRADER", "Number of Action Type"], ascending=[
True, False]), x="TRADER", y="Number of Action Type", color="Action Type", title='Whales Type of Transactions [Log Scale]', log_y=True)
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Whales Transaction')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
c1, c2 = st.columns(2)
with c1:
fig = px.pie(df6, values="Number of Action Type",
names="Action Type", title='Share of each Transaction Type Used by Whales [Percentage]', hole=0.4)
fig.update_layout(legend_title=None, legend_y=0.5)
fig.update_traces(textinfo='percent+value', textposition='inside')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
with c2:
# Top Transaction Type Used Whales [Log Value]
fig = px.bar(df6, x="Action Type", y="Number of Action Type",
color="Action Type", title='Top Transaction Type Used by Whales [Log Scale]', log_y=True)
fig.update_layout(showlegend=True, xaxis_title=None,
yaxis_title='Number of Transaction')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
# Top 10 Platforms Whales Used
fig = px.bar(df5, x="PLATFORM", y="Platforms usage",
color="PLATFORM", title='Top 10 Platforms Whales Used in Number of Transactions')
fig.update_layout(showlegend=True, xaxis_title=None,
yaxis_title='Platform Usage')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
########################################################################################################################
st.text(" \n")
st.text(" \n")
st.write(""" ## Whale Compare to Other Users """)
st.write(""" Although the share of the top 20 whales in the number of total transactions was less than 1 percent, the average transaction per user among whales was 473.125 during 12 months, which was more than 13 times as high as this average among other users. In contrast, the average transaction fees paid by whales were lower than by regular users (0.0603 and 0.06564 Near, respectively). """)
c1, c2, c3 = st.columns(3)
with c1:
# Whales Compare to Other Users Total Transactions
fig = px.bar(df4, x="STATUS", y="Number of Transactions",
color="STATUS", title='Whales Compare to Other Users Total Transactions [Log Scale]', log_y=True)
fig.update_layout(showlegend=True, xaxis_title=None,
yaxis_title='Number of Transactions')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
with c2:
# Whales Compare to Other Users Percentage of Transactions
fig = px.pie(df4, values="Number of Transactions",
names="STATUS", title='Whales Compare to Other Users Percentage of Transactions', hole=0.4)
fig.update_layout(legend_title=None, legend_y=0.5)
fig.update_traces(textinfo='percent', textposition='inside')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
with c3:
# Whales Compare to Other Users Average Transactions per User
fig = px.bar(df4, x="STATUS", y="Average Transaction Per User",
color="STATUS", title='Whales Compare to Other Users Average Transactions per User [Log Scale]', log_y=True)
fig.update_layout(showlegend=True, xaxis_title=None,
yaxis_title='Average Number of Transactions')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
c1, c2, c3 = st.columns(3)
with c1:
# Whales Compare to Other Users Total Transactions Fees
fig = px.bar(df4, x="STATUS", y="TOTAL_TX_FEE",
color="STATUS", title='Whales Compare to Other Users Total Transactions Fees [Near-Log Scale]', log_y=True)
fig.update_layout(showlegend=True, xaxis_title=None,
yaxis_title='Total Transaction Fees')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
with c2:
# Whales Compare to Other Users Percentage of Transactions Fees
fig = px.pie(df4, values="TOTAL_TX_FEE",
names="STATUS", title='Whales Compare to Other Users Percentage of Transactions Fees', hole=0.4)
fig.update_layout(legend_title=None, legend_y=0.5)
fig.update_traces(textinfo='percent', textposition='inside')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
with c3:
# Whales Compare to Other Users Average Transaction Fees paid per Users
fig = px.bar(df4, x="STATUS", y="Average Transaction Fee Per Users",
color="STATUS", title='Whales Compare to Other Users Average Transaction Fees paid per Users [Near-Log Scale ]', log_y=True)
fig.update_layout(showlegend=True, xaxis_title=None,
yaxis_title='Average Transaction Fees')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
##################################################################################################################
st.text(" \n")
st.text(" \n")
st.write(""" ## Whale Weekly Transaction and Transaction Fees """)
st.write(""" While the first six months of 2022 experienced fluctuation of around 2k transactions per week, the second half showed a significant rise and stood at 8000 transactions in a week. The first week of November 2022 had the highest transaction, with more than 14K. As previously mentioned, most of these
transactions were transferring, but the share of function-call rose significantly in the second half of 2022. """)
# Weekly Transactions Classified By Users
fig = px.bar(df2.sort_values(["DATE", "Number of Action Type"], ascending=[
True, False]), x="DATE", y="Number of Action Type", color="TRADER", title='Weekly Whales Transactions Classified By Users')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Weekly Transaction')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
# Weekly Transactions Classified By Transaction Type
fig = px.bar(df2.sort_values(["DATE", "Number of Action Type"], ascending=[
True, False]), x="DATE", y="Number of Action Type", color="Action Type", title='Weekly Whales Transactions Classified By Transaction Type')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Weekly Transaction')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
# Weekly Transactions Success and Fails
fig = px.bar(df6.sort_values(["DATE", "Number of Action Type"], ascending=[
True, False]), x="DATE", y="Number of Action Type", color="TX_STATUS", title='Weekly Whales Transactions Success and Fails')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Weekly Transaction')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
#############################################################################################################
# Weekly Transaction Fee Classified by Transaction Type
fig = px.bar(df3.sort_values(["DATE", "Weekly Transaction Fee"], ascending=[
True, False]), x="DATE", y="Weekly Transaction Fee", color="Action Type", title='Weekly Transaction Fee Classified by Transaction Type')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Weekly Transaction Fees')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
# Weekly Transaction Fee Classified by Users
fig = px.bar(df3.sort_values(["DATE", "Weekly Transaction Fee"], ascending=[
True, False]), x="DATE", y="Weekly Transaction Fee", color="TRADER", title='Weekly Transaction Fee Classified by Users')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Weekly Transaction Fees')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
##########################################################################################################
st.text(" \n")
st.text(" \n")
st.write(""" ## Whale Daily Transaction and Transaction Fees """)
st.write(""" Whale's daily transactions remained relatively unchanged in the last three months until Feb 14, 2023, which became more than three times as high as its average. While most of these transactions were committed by one user(2800 out of a total of 3300), half of these transactions were transferring, and almost half of it was function-call. It is interesting to know what happened on that day ?!. """)
# Daily Transactions Classified By Users
fig = px.bar(df7.sort_values(["DATE", "Number of Action Type"], ascending=[
True, False]), x="DATE", y="Number of Action Type", color="TRADER", title='Daily Whales Transactions Classified By Users')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Daily Transaction')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
# Daily Transactions Classified By Transaction Type
fig = px.bar(df7.sort_values(["DATE", "Number of Action Type"], ascending=[
True, False]), x="DATE", y="Number of Action Type", color="Action Type", title='Daily Whales Transactions Classified By Transaction Type')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Daily Transaction')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
# Daily Transaction Fee Classified by Transaction Type
fig = px.bar(df8.sort_values(["DATE", "Daily Transaction Fee"], ascending=[
True, False]), x="DATE", y="Daily Transaction Fee", color="Action Type", title='Daily Transaction Fee Classified by Transaction Type')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Daily Transaction Fees')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
# Daily Transaction Fee Classified by Users
fig = px.bar(df8.sort_values(["DATE", "Daily Transaction Fee"], ascending=[
True, False]), x="DATE", y="Daily Transaction Fee", color="TRADER", title='Daily Transaction Fee Classified by Users')
fig.update_layout(legend_title=None, xaxis_title=None,
yaxis_title='Daily Transaction Fees')
st.plotly_chart(fig, use_container_width=True, theme=theme_plotly)
############################################################################################
st.text(" \n")
st.info(""" #### Coclusion: ####
* 95 percent of whale transactions were transferring- No NFT traded- relatively low stake and swaps
* Although whales accounted for less than 1 percent of Total transactions, they had average transaction figures 12 times higher than regular users
* Number of Whales Transactions rose significantly in the second half of 2022
* on 14 Feb 2022, the number of transactions rose more than three times its average
""")
| Kaizen-Step/The_Whales_of_Near | pages/2_🌌_Transaction.py | 2_🌌_Transaction.py | py | 15,724 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.r... |
33881074633 | from euclide import solve_chinese_remainders
from utils import timeit
@timeit
def get_data():
with open('input.txt') as input_file:
timestamp = int(input_file.readline())
buses = input_file.readline().strip().split(',')
return timestamp, buses
def get_time(timestamp, bus):
time = timestamp % bus
if time > 0:
time = bus - time
return time
@timeit
def part_1(data):
timestamp, buses = data
buses = list(int(bus) for bus in buses if bus != 'x')
best, bus_id = max(buses), max(buses)
for bus in buses:
time = get_time(timestamp, bus)
if time < best:
best, bus_id = time, bus
return best * bus_id
@timeit
def part_2(data):
timestamp, buses = data
remainders = [] if buses[0] == 'x' else [(0, int(buses[0]))]
for i, bus in enumerate(buses[1:], 1):
if bus != 'x':
bus = int(bus)
remainders.append((bus - i, bus))
timestamp = solve_chinese_remainders(remainders)
assert all(
get_time(timestamp, int(bus)) % int(bus) == i % int(bus) for i, bus in enumerate(buses) if bus != 'x')
print([bus if bus == 'x' else get_time(timestamp, int(bus)) for bus in buses])
return timestamp
def main():
data = get_data()
part_1(data)
part_2(data)
if __name__ == "__main__":
main()
| bdaene/advent-of-code | 2020/day13/solve.py | solve.py | py | 1,351 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "utils.timeit",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "utils.timeit",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "euclide.solve_chinese_remainders",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "utils.timei... |
33578016636 | import numpy as np
import os, random
import cv2
import colorsys
from PIL import Image, ImageDraw, ImageFont
from pipeline.bbox import Box
def get_colors_for_classes(num_classes):
if (hasattr(get_colors_for_classes, "colors") and
len(get_colors_for_classes.colors) == num_classes):
return get_colors_for_classes.colors
hsv_tuples = [(x/num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] *255)),
colors))
random.seed(10101)
random.shuffle(colors)
random.seed(None)
get_colors_for_classes.colors = colors
return colors
def draw(image, boxes, classes):
if image.max() <= 1.0:
image = Image.fromarray(np.floor(image*255.+0.5).astype('uint8'))
else:
image = Image.fromarray(image.astype('uint8'))
font_dir = os.path.expanduser('~/utils/draw/font/FiraMono-Medium.otf')
font = ImageFont.truetype(
font = font_dir,
size=np.floor(3e-2*image.size[1]+0.5).astype('int32'))
thickness = (image.size[0] + image.size[1])//300
colors = get_colors_for_classes(len(classes))
for box in boxes:
box_class = classes[box.label]
if box.score is not None:
score = box.score
label = '{} {:.2f}'.format(box_class,score)
else:
label = '{}'.format(box_class)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label,font)
top, left, bottom, right = box.y0, box.x0, box.y1, box.x1
top = max(0,np.floor(top+0.5).astype('int32'))
left = max(0,np.floor(left+0.5).astype('int32'))
bottom = min(image.size[1],np.floor(bottom+0.5).astype('int32'))
right = min(image.size[0],np.floor(right+0.5).astype('int32'))
# print(label, (left, top), (right, bottom))
if top - label_size[1] >=0:
text_origin = np.array([left,top-label_size[1]])
else:
text_origin = np.array([left, top+1])
for i in range(thickness):
draw.rectangle(
[left+i, top+i, right-i, bottom-i],
outline = colors[box.label])
draw.rectangle(
[tuple(text_origin),tuple(text_origin+label_size)],
fill=colors[box.label])
draw.text(text_origin, label, fill=(0,0,0),font=font)
del draw
image = np.array(image,dtype=np.float32)
image = image/255. if image.max() >= 1.0 else image
return image
| 11mhg/utils | draw/draw.py | draw.py | py | 2,604 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "colorsys.hsv_to_rgb",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.seed",
"lin... |
31257724932 | """
Create a chart showing movie recommendation frequencies and save as
an Altair JSON for displaying on a webpage
"""
import pandas as pd
import altair as alt
from sql_tables import connect_to_db, read_tables
from sql_tables import HOST, PORT, USERNAME, PASSWORD, DB
def create_frequency_chart(engine,
input_filename="test_results_12032020_2140.csv"):
"""
Create an Altair chart showing movie recommendation frequencies
__________
Parameters:
engine (sqlalchemy connection engine): link to postgres database
containing the movie ratings
input_filename (string): path to a csv with columns 'iteration',
'user_selections', 'interpretations'
and 'recommendations', like that created by
the 'results_tester' script.
Returns:
Altair interactive chart object
"""
results = pd.read_csv(input_filename, index_col=0)
total_recs = results['recommendations'].value_counts().sum()
data = read_tables(engine,
table_names=['ratings', 'movies'])
ratings = data['ratings']
movies = data['movies']
mean_ratings = ratings.merge(movies, on="movieId").groupby("title")\
.agg({"rating": "mean", "userId": "count",
"movieId": "first", "genres": "first"})
mean_ratings = mean_ratings.rename(columns={"rating": "Average rating",
"userId": "# ratings",
"genres": "Genres"})
source = results.groupby('recommendations')['iteration'].count()\
.sort_values(ascending=False).reset_index()
source = source.reset_index().rename(columns={'index': '#',
'iteration': 'Count',
'recommendations': 'Movie'})
source['Frequency (%)'] = source['Count'] / total_recs * 100
source = source.join(mean_ratings, on="Movie")
source["Year"] = source['Movie'].str.extract(r"((?<=\()[0-9]+(?=\)\s*$))")\
.astype(int)
source["Decade"] = source['Year'].round(-1)
chart = alt.Chart(source, background="#ffffff66")\
.mark_circle(size=60)\
.encode(
x='#',
y='Frequency (%)',
color=alt.Color('Decade:O',
scale=alt.Scale(scheme='darkred')),
tooltip=['Movie', "Frequency (%)", "Average rating",
"# ratings", "Genres"]
).interactive()
return chart
if __name__ == "__main__":
CONN_STRING = f'postgres://{USERNAME}:{PASSWORD}@{HOST}:{PORT}/{DB}'
ENGINE, SESSION = connect_to_db(CONN_STRING, verbose=False)
CHART = create_frequency_chart(ENGINE)
CHART_FILENAME = "static/recommendation_frequencies.json"
CHART.save(CHART_FILENAME)
| soil55/flannflix | frequency_chart.py | frequency_chart.py | py | 3,101 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sql_tables.read_tables",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "altair.Chart",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "altair.Color",
... |
2313246928 | from API_request import get_price
import telebot
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
parameters = {
'start': '1',
'limit': '5000',
'convert': 'USD'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': 'CMC api-key',
}
bot = telebot.TeleBot("telegram API-key")
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, "Write /bitcoin to get a current bitcoin exchange rate(Provided by CoinMarketCap)")
@bot.message_handler(commands=['bitcoin'])
def send_xr(message):
response = get_price(url, parameters, headers)
bot.reply_to(message, '1 BTC = ' + str(round(float(response))) + ' USD')
bot.polling()
| axyzz/exrbot | main.py | main.py | py | 765 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "telebot.TeleBot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "API_request.get_price",
"line_number": 26,
"usage_type": "call"
}
] |
74584190434 | #-*- coding:utf-8 -*-
from torch.utils.tensorboard import SummaryWriter
from network import Generator, Discriminator
from toolbox import Train_Handler, parse
from dataset import Real_Data_Generator
from torch.utils.data import DataLoader
import torchvision.transforms as T
import torch.optim as optim
import numpy as np
import argparse
import torch
import os
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--epochs', default=5, type=int)
parser.add_argument('-b', '--batchsize', default=16, type=int)
parser.add_argument('-c', '--configfile', default='./configs/stylegan2_tiny.conf', type=str)
parser.add_argument('-d', '--device', default='cuda', type=str)
parser.add_argument('-l', '--limit', default=10000, type=int)
parser.add_argument('--datafolder', type=str)
parser.add_argument('--dataset_name', default='celebA', type=str)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--ngpu', default=1, type=int)
parser.add_argument('--weight_folder', default='./weights', type=str)
parser.add_argument('--result_folder', default='./results', type=str)
parser.add_argument('--save_per_epoch', default=1, type=int)
parser.add_argument('--print_per_iteration', default=1, type=int)
parser.add_argument('--plot_per_iteration', default=1, type=int)
parser.add_argument('--seed', default=4546, type=int)
parser.add_argument('--size', default=256, type=int)
args = parser.parse_args()
config = parse(args.configfile)
device = args.device
seed = args.seed
def worker_init_fn(worker_id):
np.random.seed(worker_id+seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
if int(config['rgb']):
composed = T.Compose([T.Resize(args.size), T.RandomHorizontalFlip(), T.RandomRotation(90), T.ToTensor(), T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)])
else:
composed = T.Compose([T.RandomHorizontalFlip(), T.ToTensor()])
dataset = Real_Data_Generator(args.datafolder, transforms=composed, limit=args.limit)
dataloader = DataLoader(dataset, batch_size=args.batchsize, num_workers=4*args.ngpu, worker_init_fn=worker_init_fn)
print("Training Datas:", len(dataset))
if not os.path.exists(args.weight_folder):
os.mkdir(args.weight_folder)
if not os.path.exists(args.result_folder):
os.mkdir(args.result_folder)
G = Generator(config)
D = Discriminator(config)
G_optim = optim.Adam(G.parameters(), lr=args.lr, betas=(0.5, 0.9))
D_optim = optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.9))
writer = SummaryWriter()
print("Running On:", device)
train_handler = Train_Handler(args.dataset_name,
args.epochs,
G_optim,
D_optim,
dataloader,
device = device,
writer = writer,
save_per_epoch = args.save_per_epoch,
print_per_iteration = args.print_per_iteration,
plot_per_iteration = args.plot_per_iteration,
weight_folder=args.weight_folder,
result_folder=args.result_folder
)
train_handler.run(G, D, config)
| galaxygliese/Simple-Implementation-of-StyleGAN2-PyTorch | train.py | train.py | py | 3,100 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "toolbox.parse",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.random"... |
29637290452 | import re
import types
from docutils import nodes
from sphinx.util.docutils import SphinxDirective
from . import xnodes
class UserRepository:
def __init__(self):
self._data = None
self._env = None
self._fullnames = None
def clear(self):
self._data = None
self._fullnames = None
def use_env(self, env):
self._env = env
@property
def data(self):
if not self._data:
self._data = types.SimpleNamespace()
self._compute_version_strings(maxsize=7,
newname="next",
oldname="past")
self._compute_user_scores()
self._aggregate_scores()
return self._data
@property
def fullnames(self):
if not self._fullnames:
self._compute_fullnames()
return self._fullnames
def _compute_version_strings(self, maxsize, newname, oldname):
"""
self._data.sorted_versions = [<version>] # list of strings
self._data.doc_versions = {<docname>: <version>}
"""
env = self._env
assert env and isinstance(env.xchangelog, list)
v2_set = set()
for info in env.xchangelog:
version_tuple = info["version"]
v2 = version_tuple[:2]
v2_set.add(v2)
short_versions = sorted(v2_set, reverse=True)
sorted_versions = []
v2_to_vstr = {}
for i, v2 in enumerate(short_versions):
if v2[0] == 999:
vstr = newname
elif len(short_versions) > maxsize and i >= maxsize - 1:
vstr = oldname
else:
vstr = "%d.%d" % v2
v2_to_vstr[v2] = vstr
if i < maxsize:
sorted_versions.append(vstr)
doc_versions = {}
for info in env.xchangelog:
docname = info["doc"]
version_tuple = info["version"]
vstr = v2_to_vstr[version_tuple[:2]]
doc_versions[docname] = vstr
self._data.sorted_versions = sorted_versions
self._data.doc_versions = doc_versions
def _compute_fullnames(self):
"""
self._data.fullnames = {<username>: <fullname>}
"""
env = self._env
assert env and env.xcontributors
fullnames = {}
for page, pagedata in env.xcontributors.items():
for kind in ["PRs", "issues"]:
assert kind in pagedata
for username, userdata in pagedata[kind].items():
_, fullname = userdata
if fullname:
known_fullname = fullnames.get(username)
if known_fullname and fullname != known_fullname:
raise ValueError(
"User `%s` has different names: `%s` and `%s`"
% (username, fullname, known_fullname))
fullnames[username] = fullname
self._fullnames = fullnames
def _compute_user_scores(self):
"""
self._data.scores = {<username>:
{<version>:
{"PRs": <score>, "issues": <score>}}}
"""
env = self._env
assert env and env.xcontributors
scores = {}
for page, pagedata in env.xcontributors.items():
ver = self._data.doc_versions[page]
for kind in ["PRs", "issues"]:
assert kind in pagedata
for username, userdata in pagedata[kind].items():
score, _ = userdata
if username not in scores:
scores[username] = {}
if ver not in scores[username]:
scores[username][ver] = {"PRs": 0, "issues": 0}
scores[username][ver][kind] += score
self._data.scores = scores
def _aggregate_scores(self, decay_factor=0.85, issue_weight=0.25):
"""
self._data.aggregate_scores = {<username>: <total_score>}
"""
version_weights = {}
for i, version in enumerate(self._data.sorted_versions):
version_weights[version] = decay_factor ** i
total_scores = {}
for username, userinfo in self._data.scores.items():
user_score = 0
for version, scores in userinfo.items():
assert isinstance(version, str)
assert version in version_weights
weight = version_weights[version]
version_score = scores["PRs"] + issue_weight * scores["issues"]
user_score += version_score * weight
total_scores[username] = user_score
self._data.aggregate_scores = total_scores
def get_user_list(self):
users = self.data.aggregate_scores
return sorted(users.keys(), key=lambda u: (-users[u], u))
def get_version_list(self):
return self.data.sorted_versions
def get_full_name(self, username):
return self.fullnames.get(username, username)
def get_user_score(self, username):
return self.data.aggregate_scores.get(username, 0)
def get_user_score_in_version(self, username, version):
scores = self.data.scores[username].get(version, None)
if scores:
return (scores["PRs"], scores["issues"])
else:
return (0, 0)
# Singleton instance
users = UserRepository()
#-------------------------------------------------------------------------------
# .. contributors
#-------------------------------------------------------------------------------
class XContributorsDirective(SphinxDirective):
has_content = True
required_arguments = 0
optional_arguments = 0
option_spec = {}
def run(self):
self._parse(self.content.data)
self._store_env()
return [contributors_placeholder_node(self.env.docname)]
def _parse(self, lines):
rx_separator = re.compile(r"\-+")
rx_contributor = re.compile(r"(?:(\d+)\s+)?" # contribution count
r"@?([\w\-]+)" # username
r"(?:\s+<([^<>]*)>)?") # full name
self.people = {"PRs": {}, "issues": {}}
mode = "PRs"
for line in lines:
if not line:
continue
if re.fullmatch(rx_separator, line):
mode = "issues"
continue
mm = re.fullmatch(rx_contributor, line)
if not mm:
raise self.error("Invalid contributor %r" % line)
amount = int(mm.group(1)) if mm.group(1) else 1
username = mm.group(2)
fullname = mm.group(3)
if username in self.people[mode]:
raise self.error("Duplicate user %s in ..contributors "
"directive" % username)
self.people[mode][username] = (amount, fullname)
if not self.people["PRs"]:
raise self.error("Missing code contributors")
def _store_env(self):
env = self.env
docname = env.docname
if not hasattr(env, "xchangelog"):
env.xchangelog = []
version = None
for entry in env.xchangelog:
if entry["doc"] == docname:
version = entry["version"]
assert isinstance(version, tuple)
if not version:
raise self.error("..contributors directive must be used in "
"conjunction with a ..changelog directive")
if not hasattr(env, "xcontributors"):
env.xcontributors = dict()
if docname in env.xcontributors:
raise self.error("Only single ..contributors directive is "
"allowed on a page")
env.xcontributors[docname] = self.people
# This node will get replaced with actual content
class contributors_placeholder_node(nodes.Element, nodes.General):
"""
Temporary node that will be replaced with actual content before
the page is rendered.
"""
def __init__(self, docname):
super().__init__()
self._docname = docname
def resolve(self, env):
record = env.xcontributors[self._docname]
sect = nodes.section(ids=["contributors"], classes=["contributors"])
sect += nodes.title("", "Contributors")
sect += nodes.paragraph("", self._prepare_text(record))
if record["PRs"]:
sect += nodes.paragraph("", "",
nodes.strong("", "Code & documentation contributors:"))
sect += self._render_contributors(record["PRs"])
if record["issues"]:
sect += nodes.paragraph("", "",
nodes.strong("", "Issues contributors:"))
sect += self._render_contributors(record["issues"])
if self.parent.parent is None:
raise ValueError(
"..contributors directive should be inside ..changelog directive")
j = self.parent.parent.children.index(self.parent)
self.parent.parent.children.insert(j + 1, sect)
self.replace_self([])
def _prepare_text(self, record):
n_contributors_prs = len(record["PRs"])
n_contributors_issues = len(record["issues"])
assert n_contributors_prs
if n_contributors_prs == 1:
people_prs = "1 person"
else:
people_prs = "%d people" % n_contributors_prs
if n_contributors_issues == 1:
people_issues = "1 more person"
else:
people_issues = "%d more people" % n_contributors_issues
text = ("This release was created with the help of %s who contributed "
"code and documentation" % people_prs)
if n_contributors_issues:
text += (", and %s who submitted bug reports and feature requests"
% people_issues)
text += "."
return text
def _render_contributors(self, people):
ul = nodes.bullet_list(classes=["changelog-list", "simple"])
for username in people.keys():
fullname = users.get_full_name(username)
url = "https://github.com/" + username
link = nodes.reference("", fullname, refuri=url, internal=False)
li = nodes.list_item(classes=["changelog-list-item", "gh"])
li += nodes.inline("", "", link)
ul += li
return ul
#-------------------------------------------------------------------------------
# .. contributors-grid
#-------------------------------------------------------------------------------
class XContributorsGridDirective(SphinxDirective):
has_content = False
required_arguments = 0
optional_arguments = 0
option_spec = {}
def run(self):
return [contributors_grid_placeholder_node()]
class contributors_grid_placeholder_node(nodes.Element, nodes.General):
def resolve(self):
out = xnodes.table(classes=["contributors-grid"])
versions = users.get_version_list()
row = xnodes.tr(classes=["versions"])
row += xnodes.td()
for version in versions:
row += xnodes.td(version, classes=["version"])
out += row
for username in users.get_user_list():
fullname = users.get_full_name(username)
score = users.get_user_score(username)
url = "https://github.com/" + username
link = nodes.reference("", fullname, refuri=url, internal=False,
reftitle="%.2f" % score)
row = xnodes.tr()
row += xnodes.td(nodes.inline("", "", link), classes=["name"])
for version in versions:
scores = users.get_user_score_in_version(username, version)
nprs, nissues = scores
content = "\u25cf" if nprs else \
"\u2022" if nissues else \
""
classes = ["prs"] if nprs else \
["issues"] if nissues else \
[]
details = ""
if nprs:
details += "%d pull request" % nprs
if nprs != 1:
details += "s"
if nissues:
if details:
details += " + "
details += "%d issue" % nissues
if nissues != 1:
details += "s"
row += xnodes.td(content, title=details, classes=classes)
out += row
self.replace_self([out])
#-------------------------------------------------------------------------------
# Event handlers
#-------------------------------------------------------------------------------
# This event is emitted when a source file is removed from the
# environment (including just before it is freshly read), and
# extensions are expected to purge their info about that file.
#
def on_env_purge_doc(app, env, docname):
if hasattr(env, "xcontributors"):
env.xcontributors.pop(docname, None)
users.clear()
# This event is only emitted when parallel reading of documents is
# enabled. It is emitted once for every subprocess that has read some
# documents.
#
# You must handle this event in an extension that stores data in the
# environment in a custom location. Otherwise the environment in the
# main process will not be aware of the information stored in the
# subprocess.
#
def on_env_merge_info(app, env, docnames, other):
if not hasattr(other, "xcontributors"):
return
if hasattr(env, "xcontributors"):
env.xcontributors.update(other.xcontributors)
else:
env.xcontributors = other.xcontributors
# Emitted when a doctree has been “resolved” by the environment, that
# is, all references have been resolved and TOCs have been inserted.
# The doctree can be modified in place.
#
def on_doctree_resolved(app, doctree, docname):
env = app.builder.env
if not hasattr(env, "xcontributors"):
return
users.use_env(env)
for node in doctree.traverse(contributors_placeholder_node):
node.resolve(env)
for node in doctree.traverse(contributors_grid_placeholder_node):
node.resolve()
#-------------------------------------------------------------------------------
# Extension setup
#-------------------------------------------------------------------------------
def setup(app):
app.setup_extension("_ext.xnodes")
app.add_directive("contributors", XContributorsDirective)
app.add_directive("contributors-grid", XContributorsGridDirective)
app.connect("env-purge-doc", on_env_purge_doc)
app.connect("env-merge-info", on_env_merge_info)
app.connect("doctree-resolved", on_doctree_resolved)
return {"parallel_read_safe": True,
"parallel_write_safe": True,
"env_version": 1}
| h2oai/datatable | docs/_ext/xcontributors.py | xcontributors.py | py | 15,129 | python | en | code | 1,763 | github-code | 1 | [
{
"api_name": "types.SimpleNamespace",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sphinx.util.docutils.SphinxDirective",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 186,
"usage_type": "call"
},
{
"api_name"... |
72490710755 | from collections import deque
class Solution:
def validUtf8( data: 'list[int]') -> bool:
start = 0
#data a deque for easy poping so that we can go through the list of nums effeciently
data = deque(data)
try:
while data:
#& means only the overlapping will remain and since 0xFF is 255 in hexadecimal all the 1s in the first num will remain
byt = data.popleft()&0xFF
#checks to see if the first bit is one so we know its not a 1 byte character
if byt&0x80:
t = 0
#cheecks if the byte is 2 bytes
if (byt&0b11100000) == 0b11000000:
t = 1
#checks if the byte is 3 bytes
elif (byt&0b11110000) == 0b11100000:
t = 2
#checks if the byte is 4 bytes
elif (byt&0b11111000) == 0b11110000:
t = 3
#else return false
else:
return False
# while times is not none so it keeps going through this while loop for the rest of the nums
while t:
byt = data.popleft()&0xFF
#if the next bit is not a 1
if (byt&0b11000000) != 0b10000000:
return False
t -= 1
return True
except:
return False
# def UTFcheck(nums, s, size):
# for i in range(s+1, s+ size + 1):
# if i >= len(nums) or (nums[i] >> 6) != 0b10: return False
# return True
# while start < len(data):
# first = data[start]
# if (first >> 3) == 0b11110 and UTFcheck(data, start, 3): start +=4
# elif (first >> 4) == 0b1110 and UTFcheck(data, start, 2): start +=3
# elif (first >> 5) == 0b110 and UTFcheck(data, start, 1): start +=2
# elif (first >> 7) == 0: start +=1
# else:
# return False
# return True
print([197,130,1])
print(validUtf8([197,130,1]))
print([235,140,5])
print(validUtf8([235,140,5]))
print([240,162,138,147,145])
print(validUtf8([240,162,138,147,145]))
| lucasrouchy/validUTF | validUTF.py | validUTF.py | py | 2,379 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
71942633953 | # kdc server
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
CLIENT_KEY="CLIENT_KEY"
TGS_KEY="TGS_KEY"
SERVER_KEY="SERVER_KEY"
CT_SK="CT_SK"
CS_SK="CS_SK"
DATA_SERVER='http://localhost:8002'
class MyHandler(BaseHTTPRequestHandler):
def SendRep(self, data):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
response = data
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
def AS_module(self,data):
basic= data['basic']
#query client info from db
#Encry info with client key
rep={'method':'AS', 'basic':CLIENT_KEY,'TGT': TGS_KEY, "CT_SK":CT_SK}
self.SendRep(rep)
return
def TGS_module(self, data):
#query from db to check if support the server
server=data['server']
if(server!= DATA_SERVER):
return
#verify client basic info
basic=data['basic']
if(basic != CT_SK):
return
TGT=data['TGT']
if(TGT != TGS_KEY):
return
rep={'method':'TGS', 'basic':CT_SK,'ST':SERVER_KEY , "CS_SK":CS_SK}
self.SendRep(rep)
return
def handle_data(self, data):
method = data['method']
if(method=="AS"):
self.AS_module(data)
elif(method=="TGS"):
self.TGS_module(data)
else:
print("Can not support the method:", method)
return
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
data = json.loads(post_data)
print('Received data from client:', data)
self.handle_data(data)
# self.send_response(200)
# self.send_header('Content-type', 'application/json')
# self.end_headers()
# response = {'message': 'Received data: ' + str(data)}
# self.wfile.write(bytes(json.dumps(response), "utf8"))
return
def run(server_class=HTTPServer, handler_class=MyHandler):
server_address = ('', 8001)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
if __name__ == "__main__":
run()
| WangWeiPengHappy/simple_kerberos | source/kdc.py | kdc.py | py | 2,319 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "http.server.... |
19031021092 | import pandas as pd
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier as RFC
from src.icll import ICLL
X, y = make_classification(n_samples=500, n_features=5, n_informative=3)
X = pd.DataFrame(X)
icll = ICLL(model_l1=RFC(), model_l2=RFC())
icll.fit(X, y)
probs = icll.predict_proba(X)
# resampling alternative
from sklearn.datasets import make_classification
from imblearn.over_sampling import SMOTE, ADASYN
X_train, y_train = make_classification(n_samples=500, n_features=5, n_informative=3)
X_res, y_res = SMOTE().fit_resample(X_train, y_train)
# comparisons
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, roc_auc_score
from plotnine import *
# loading diabetes dataset https://github.com/vcerqueira/blog/tree/main/data
data = pd.read_csv('data/pima.csv')
# splitting target variable from explanatory variables
X, y = data.drop('target', axis=1), data['target']
X = X.fillna(X.mean())
# train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# doing resampling with smote
X_res, y_res = SMOTE().fit_resample(X_train, y_train)
#
rf = RFC()
smote = RFC()
icll = ICLL(model_l1=RFC(), model_l2=RFC())
rf.fit(X_train, y_train)
smote.fit(X_res, y_res)
icll.fit(X_train, y_train)
rf_probs = rf.predict_proba(X_test)
smote_probs = smote.predict_proba(X_test)
icll_probs = icll.predict_proba(X_test)
print(roc_auc_score(y_test, rf_probs[:, 1]))
print(roc_auc_score(y_test, smote_probs[:, 1]))
print(roc_auc_score(y_test, icll_probs))
fpr_rf, tpr_rf, _ = roc_curve(y_test, rf_probs[:, 1])
fpr_sm, tpr_sm, _ = roc_curve(y_test, smote_probs[:, 1])
fpr_ic, tpr_ic, _ = roc_curve(y_test, icll_probs)
roc_rf = pd.DataFrame({'fpr': fpr_rf, 'tpr': tpr_rf})
roc_sm = pd.DataFrame({'fpr': fpr_sm, 'tpr': tpr_sm})
roc_icll = pd.DataFrame({'fpr': fpr_ic, 'tpr': tpr_ic})
roc_rf['Model'] = 'RF'
roc_sm['Model'] = 'SMOTE'
roc_icll['Model'] = 'ICLL'
df = pd.concat([roc_sm, roc_icll], axis=0)
roc_plt = ggplot(df) + \
aes(x='fpr', y='tpr', group='Model', color='Model') + \
theme_classic(base_family='Palatino', base_size=12) + \
theme(plot_margin=.125,
axis_text=element_text(size=10),
legend_title=element_blank(),
legend_position='top') + \
geom_line(size=1.7) + \
xlab('False Positive Rate') + \
ylab('True Positive Rate') + \
ylim(0, 1) + xlim(0, 1) + \
ggtitle('') + \
geom_abline(intercept=0,
slope=1,
size=1,
color='black',
linetype='dashed')
print(roc_plt)
# roc_plt.save(f'{output_dir}/dist_plot.pdf', height=5, width=8)
| vcerqueira/blog | posts/class_imbalance_icll.py | class_imbalance_icll.py | py | 2,808 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "src.icll.ICLL",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "skl... |
9378055793 | import os
import json
import time
import requests
from math import floor
import vlc
def download():
print('We need to download data from the web...one moment')
URL = "http://91.132.145.114/json/stations"
response = requests.get(URL)
if response.status_code == 200:
open("stations", "wb").write(response.content)
else:
print('strona z bazą stacji radiowych jest nieaktywna..')
exit
# check if the file exists and is not too old, alternatively download
filename = 'stations'
days = 30
if os.path.exists(filename):
file_time = os.path.getmtime(filename)
current_time = time.time()
time_diff = current_time - file_time
days_diff = floor(time_diff / (24 * 3600))
if days_diff > days:
download()
else:
print(f'loading data from a file... it\'s only {days_diff} days old')
else:
download()
with open('stations', 'r') as st:
stacje = json.load(st)
po_wyb = []
while len(po_wyb) == 0:
name_station = input('Enter the name of the station: ').lower()
for stacja in stacje:
if name_station in stacja.get('name').lower():
# print(stacja)
c = stacja.get('name'), stacja.get('url'), stacja.get('votes'), stacja.get('bitrate')
po_wyb.append(c)
print('\nnr vote bitrate name')
for i, y in enumerate(po_wyb):
print(str(i+1).ljust(5, ' ') + (str(y[2]).ljust(10, ' ') + str(y[3]).ljust(13, ' ') + str(y[0])))
ktora = int(input('\nWhich one you want? (enter the nr): '))
urls = po_wyb[ktora-1][1]
print(f'stacja: {urls}\n')
# zapisac = input('Write your stream to a file? y/n ').lower()
# if zapisac == 'y':
# extension = int(time.monotonic())
# chosen = (po_wyb[ktora-1][0]).replace(' ', '_')#.replace('\'','')
# filling = (f'/home/maco/Pulpit/{chosen}_{extension} -i {urls} &') or use $PWD
# filling = (f'/home/maco/Pulpit/{chosen} -i {urls} &') # or use $PWD
# print(chosen)
# os.system('ffmpeg -f ogg '+ filling)
def getData(url):
Instance = vlc.Instance()
player = Instance.media_player_new()
Media = Instance.media_new(url)
Media.get_mrl()
player.set_media(Media)
player.play()
prev = ""
while True:
time.sleep(1)
m = Media.get_meta(12) # vlc.Meta 12: 'NowPlaying',
if m != prev:
print(m)
prev = m
# with open(f'/home/maco/Pulpit/{chosen}.txt','a') as file:
# file.write(f'{m}\n')
return player.audio_get_track_description()
print(getData(urls))
| maccu71/projects | stacje.py | stacje.py | py | 2,542 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.getmtime",
"line... |
72556059235 | #!/usr/bin/python3
"""A Base class"""
import json
import turtle
import csv
class Base:
"""A Base class"""
__nb_objects = 0
def __init__(self, id=None):
"""constructor for Base class
Args:
id (int): an id attribute. Defaults to None.
"""
if id is not None:
self.id = id
else:
__class__.__nb_objects += 1
self.id = self.__nb_objects
@staticmethod
def to_json_string(list_dictionaries):
"""returns JSON string rep of
list_dictionaries
Args:
list_dictionaries (dict): a dictionary
Returns:
JSON: JSON representation
"""
if list_dictionaries is None or list_dictionaries == []:
return "[]"
return json.dumps(list_dictionaries)
@classmethod
def save_to_file(cls, list_objs):
"""write JSON str rep of list_objs to a file
Args:
list_objs (Obj): list of object instances
"""
filename = "{}.json".format(cls.__name__)
with open(filename, "w") as jsonfile:
if list_objs is None:
jsonfile.write("[]")
else:
list_dicts = [o.to_dictionary() for o in list_objs]
jsonfile.write(Base.to_json_string(list_dicts))
@staticmethod
def from_json_string(json_string):
"""Return the deserialization of a JSON string.
Args:
json_string (str): A JSON str representation of a list of dicts.
Returns:
If json_string is None or empty - an empty list.
Otherwise - the Python list represented by json_string.
"""
if json_string is None or json_string == "[]":
return []
return json.loads(json_string)
@classmethod
def create(cls, **dictionary):
"""Return a instantiated class from a dictionary of attributes.
Args:
**dictionary (dict): Key/value pairs of attributes to initialize.
"""
if dictionary and dictionary != {}:
if cls.__name__ == "Rectangle":
new = cls(1, 1)
else:
new = cls(1)
new.update(**dictionary)
return new
@classmethod
def load_from_file(cls):
"""Return a list of classes instantiated from a file of JSON strings.
Reads from `<cls.__name__>.json`.
Returns:
If the file does not exist - an empty list.
Otherwise - a list of instantiated classes.
"""
filename = str(cls.__name__) + ".json"
try:
with open(filename, "r") as jsonfile:
list_dicts = Base.from_json_string(jsonfile.read())
return [cls.create(**dt) for dt in list_dicts]
except IOError:
return []
@classmethod
def save_to_file_csv(cls, list_objs):
"""CSV serialization of a list of objects to a file.
Args:
list_objs (list): A list of inherited Base instances.
"""
filename = cls.__name__ + ".csv"
with open(filename, "w", newline="") as csvfile:
if list_objs is None or list_objs == []:
csvfile.write("[]")
else:
if cls.__name__ == "Rectangle":
fieldnames = ["id", "width", "height", "x", "y"]
else:
fieldnames = ["id", "size", "x", "y"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
for obj in list_objs:
writer.writerow(obj.to_dictionary())
@classmethod
def load_from_file_csv(cls):
"""Return a list of classes instantiated from a CSV file.
Returns:
If the file does not exist - an empty list.
Otherwise - a list of instantiated classes.
"""
filename = cls.__name__ + ".csv"
try:
with open(filename, "r", newline="") as csvfile:
if cls.__name__ == "Rectangle":
fieldnames = ["id", "width", "height", "x", "y"]
else:
fieldnames = ["id", "size", "x", "y"]
list_dicts = csv.DictReader(csvfile, fieldnames=fieldnames)
list_dicts = [dict([k, int(v)] for k, v in d.items())
for d in list_dicts]
return [cls.create(**d) for d in list_dicts]
except IOError:
return []
@staticmethod
def draw(list_rectangles, list_squares):
"""Draw Rectangles and Squares using the turtle module.
Args:
list_rectangles (list): A list of Rectangle objects to draw.
list_squares (list): A list of Square objects to draw.
"""
ijapa = turtle.Turtle()
ijapa.screen.bgcolor("#b7312c")
ijapa.pensize(3)
ijapa.shape("turtle")
for rect in list_rectangles:
ijapa.showturtle()
ijapa.up()
ijapa.goto(rect.x, rect.y)
ijapa.down()
for i in range(2):
ijapa.forward(rect.width)
ijapa.left(90)
ijapa.forward(rect.height)
ijapa.left(90)
ijapa.hideturtle()
ijapa.color("#b5e3d8")
for sq in list_squares:
ijapa.showturtle()
ijapa.up()
ijapa.goto(sq.x, sq.y)
ijapa.down()
for i in range(2):
ijapa.forward(sq.width)
ijapa.left(90)
ijapa.forward(sq.height)
ijapa.left(90)
ijapa.hideturtle()
turtle.exitonclick()
| Martin-do/alx-higher_level_programming | 0x0C-python-almost_a_circle/models/base.py | base.py | py | 5,721 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_numb... |
30171935940 | from objects.anyPage import anyPage
from objects.customParser import parser
from os.path import dirname, abspath
from configparser import ConfigParser
import json
class configurator:
def __init__(self,value):
configParser = ConfigParser()
self._configPath = dirname(dirname(abspath(__file__))) + "/config/config.ini"
configParser.read(self._configPath)
self._config = configParser
if (not self._config.has_section(value)):
print(f"No page defined for {value}")
supportedSections = config.sections()[1:]
print(f"Page config could be defined for {supportedSections}")
return
self._domain = self._config.get('global','domain')
self._proxies = json.loads(self._config.get('global','proxyList'))
self._proxyAllowed = self._config.get('global','proxyAllowed')
self._startPage = self._config.get(value,'start')
self._link = self._config.get(value,'link')
self._clicksAllowed = self._config.get(value,'clicksAllowed')
self._parserObject = parser()
self._siteSection = value
def _getMoreValue(self,pageNo):
returned = []
domain = self._domain
proxies = self._proxies
proxyAllowed = self._proxyAllowed
startPage = self._startPage
link = self._link
page = anyPage(domain,startPage,link, proxies,proxyAllowed)
page.startOnly(0)
page.setPageNo(pageNo)
pageURL = page.getURL()
parsePage = self._parserObject
moreButton = parsePage.getElementAttribute(page.getData(),"//a[@class='morelink']","href")
moreButtonId = moreButton.split("?")[1].split("=")[1]
print(f"__Obtained pageid value {moreButtonId}")
return moreButtonId
def generateConfigList(self):
returned = []
domain = self._domain
startPage = self._startPage
link = self._link
proxies = self._proxies
proxyAllowed = self._proxyAllowed
page = anyPage(domain,startPage,link, proxies,proxyAllowed)
page.startOnly(1)
pageURL = page.getURL()
siteSection = self._siteSection
parsePage = self._parserObject
print(f"_Setting up start page value for {siteSection}")
if (siteSection == "main"): # exception if site section is main
startPageId = "1" # value is 1
else: #others have first value stored on corresponding pages
startPageId = parsePage.getElementAttribute(page.getData(),"//tr[@class='athing']","id")
self._config[siteSection]["startPage"] = startPageId
moreLink = startPageId # To be used later
returned.append(startPageId)
print(f"_Start page value for {siteSection} has been set as {startPageId}")
clicksCount = self._clicksAllowed
for moreButtonClick in range(1,int(clicksCount)+1):
moreLink = self._getMoreValue(moreLink)
returned.append(moreLink)
self._config[siteSection]["pagesList"] = json.dumps(returned)
with open(self._configPath, 'w') as configfile:
self._config.write(configfile)
configfile.close()
| giantpanda9/codesamples-python3-gevent-site-parser | objects/customCrawlerConfigurator.py | customCrawlerConfigurator.py | py | 2,833 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.loads",... |
72780642275 | import os
import smtpd
import sys
import asyncore
import email
from email.header import decode_header
########################################################################
#
#
# IF YOU CHANGE THIS FILE, YOU HAVE TO REBUILD THE DOCKER CONTAINER
# THE CURRENT manual_run.py will not detect changes
#
#
########################################################################
class MySMTPD(smtpd.DebuggingServer):
"""
A mock email server to trap email in files. Each email is saved to
a file with the username and an integer (starting at 1), e.g.:
colemanb_1.txt
"""
def __init__(self, port, directory):
"""
Initialize the mock server.
:param port: the port to listen on
:param directory: the directory to save files
:return: None
"""
smtpd.DebuggingServer.__init__(self, ('localhost', port), None)
self.directory = directory
self.users = {}
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Process one message (save it to the appropriate file
:param peer: the name of the sending server, string
:param mailfrom: the from field, string
:param rcpttos: the recipient(s), a list of string
:param data: the message
:return: None
"""
# we only send to 1 user at a time, so this should be safe
# take only the username so we don't have a @ in the filename
user = rcpttos[0].split("@")[0]
if user not in self.users:
self.users[user] = 1
filename = '{}_{:0>3}.txt'.format(user, self.users[user])
self.users[user] += 1
with open(os.path.join(self.directory, filename), 'w') as f:
msg = email.message_from_string(data)
f.write('From: ' + mailfrom + '\n')
# The subject line is encoded. Read the docs for email.header
# to understand the double [0]
f.write('Subject: ' + decode_header(msg['Subject'])[0][0].decode() +
'\n\n')
if msg.is_multipart():
for payload in msg.get_payload():
f.write(payload.get_payload(decode=True).decode('utf-8'))
else:
f.write(msg.get_payload(decode=True).decode('utf-8'))
def main():
if len(sys.argv) != 3:
print("provide port and directory")
return
MySMTPD(int(sys.argv[1]), sys.argv[2])
asyncore.loop()
main() | bjcoleman/katacoda-scenarios | git-keeper-tutorial/assets/mysmtpd.py | mysmtpd.py | py | 2,486 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "smtpd.DebuggingServer",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "smtpd.DebuggingServer.__init__",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "smtpd.DebuggingServer",
"line_number": 37,
"usage_type": "attribute"
},
{
... |
988817178 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 17:18:39 2021
@author: Dell
"""
import pandas as pd
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score, cross_validate, cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn.tree import export_graphviz
import matplotlib as mpl
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
import numpy as np
from sklearn import linear_model
from math import sqrt as sq
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
Ref = pd.read_csv('REf/Bolivia/Microgrids_results.csv', index_col=0)
sc1 = pd.read_csv('SC1/Bolivia/Microgrids_results.csv', index_col=0)
sc2 = pd.read_csv('SC2/Bolivia/Microgrids_results.csv', index_col=0)
sc3 = pd.read_csv('SC3/Bolivia/Microgrids_results.csv', index_col=0)
sc4 = pd.read_csv('SC4/Bolivia/Microgrids_results.csv', index_col=0)
print('The number of microgrids in the ref scenario is ' + str(len(Ref)) + '.')
print('The number of microgrids in the SC1 scenario is ' + str(len(sc1)) + '.')
print('The number of microgrids in the SC2 scenario is ' + str(len(sc2)) + '.')
print('The number of microgrids in the SC3 scenario is ' + str(len(sc3)) + '.')
print('The number of microgrids in the SC4 scenario is ' + str(len(sc4)) + '.')
for i in sc3.index:
if sc3['PVcapacity2025'][i] <0:
sc3.loc[i,'PVcapacity2025'] = 0
title_size = 70
pad = 30
tick_size = 40
fontsize = '30'
mpl.rcParams['xtick.labelsize'] = tick_size
mpl.rcParams['ytick.labelsize'] = tick_size
fig = plt.figure(figsize=(40,30))
size = [40,40]
label_size = 40
tick_size = 35
# LCOE plot
ax1 = fig.add_subplot(221)
LCOE = []
LCOE.append(Ref['MinimumOverallLCOE2025'])
LCOE.append(sc1['MinimumOverallLCOE2025'])
LCOE.append(sc2['MinimumOverallLCOE2025'])
LCOE.append(sc3['MinimumOverallLCOE2025'])
LCOE.append(sc4['MinimumOverallLCOE2025'])
ax1.boxplot(LCOE)
ax1.set_xlabel('Scenario', size=label_size)
ax1.set_ylabel('LCOE (USD/kWh)', size=label_size)
ax1.set_title('LCOE', size=title_size,pad=pad)
ax1.set_xticklabels(['Ref', 'SC1', 'SC2', 'SC3', 'SC4'])
# NPC plot
ax2 = fig.add_subplot(222)
NPC = []
NPC.append(Ref['NPC2025']/1000)
NPC.append(sc1['NPC2025']/1000)
NPC.append(sc2['NPC2025']/1000)
NPC.append(sc3['NPC2025']/1000)
NPC.append(sc4['NPC2025']/1000)
ax2.boxplot(NPC)
ax2.set_xlabel('Scenario', size=label_size)
ax2.set_ylabel('NPC (Thousands of USD)', size=label_size)
ax2.set_title('NPC', size=title_size,pad=pad)
ax2.set_xticklabels(['Ref', 'SC1', 'SC2', 'SC3', 'SC4'])
# PV plot
ax3 = fig.add_subplot(212)
PV = []
PV.append(Ref['PVcapacity2025'])
PV.append(sc1['PVcapacity2025'])
PV.append(sc2['PVcapacity2025'])
PV.append(sc3['PVcapacity2025'])
PV.append([0,0,0])
ax3.boxplot(PV)
ax3.set_xlabel('Scenario', size=label_size)
ax3.set_ylabel('PV (kW)', size=label_size)
ax3.set_title('PV installed capacity', size=title_size,pad=pad)
ax3.set_xticklabels(['Ref', 'SC1', 'SC2', 'SC3', 'SC4'])
plt.savefig('Scenarios_Results.png', bbox_inches='tight')
plt.show()
# The number of microgrids in the ref scenario is 221.
# The number of microgrids in the SC1 scenario is 259.
# The number of microgrids in the SC2 scenario is 163.
# The number of microgrids in the SC3 scenario is 345.
# The number of microgrids in the SC4 scenario is 701. | Slbalderrama/Phd_Thesis_Repository | Electrification_Path/Plot_Scenarios.py | Plot_Scenarios.py | py | 3,532 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
22025020858 | import networkx as nx
from random import randint
from math import exp
import numpy as np
from typing import List, Dict, FrozenSet, Iterator, Tuple
from pydantic import BaseModel
from .graph_utils import (
list_subsets_of_given_size,
pairs_of_sets,
)
class SubtreeData(BaseModel):
agg_root: int
agg_size: int
att_root: int
att_size: int
class SubtreeAnalizer():
def __init__(self,
root: int,
size: int,
children_count: List[int],
bottom_up_order: List[int],
tree: nx.Graph,
graph: nx.Graph,
graph_colors: List[int],
colors: FrozenSet,
memory_array: Dict[FrozenSet, np.ndarray]
):
self._root = root
self._size = size
self._children_count = children_count
self._bottom_up_order = bottom_up_order
self._tree = tree
self._graph = graph
self._graph_colors = graph_colors
self._colors = colors
self._memory_array = memory_array
self._mapping_restore: Dict[Tuple[FrozenSet[int], int, int],
Tuple[Tuple[FrozenSet[int], int, int],
Tuple[FrozenSet[int], int, int]]
] = {}
def find_subtree(self):
for con_data in self._subtree_connections_asc():
agg_subt, agg_size, att_subt, att_size = con_data
for colors_data in self._subtree_colorings(
agg_size[1], att_size[1]
):
cols_subset, agg_cols, att_cols = colors_data
self._check_mappings(
cols_subset, agg_subt[1], agg_cols, att_subt[1], att_cols
)
try:
g_root = list(self._memory_array[self._colors][0, :]).index(True)
return self._restore_mapping(g_root)
except (ValueError, KeyError):
return None
def _restore_mapping(self, g_root):
mapping = [0] * len(self._tree)
self._traverse(self._colors, mapping, g_root, 0)
return mapping
def _traverse(self, colors, mapping, g_root, t_root):
mapping[t_root] = g_root
if len(colors) == 1:
return
(col1, tr1, gr1), (col2, tr2, gr2) = self._mapping_restore[
(colors, t_root, g_root)
]
self._traverse(col1, mapping, gr1, tr1)
self._traverse(col2, mapping, gr2, tr2)
def _check_mappings(self, cols_subset, agg_subt, agg_cols, att_subt,
att_cols):
for v in self._graph:
for vn in self._graph.neighbors(v):
is_first_colorable = self._memory_array[agg_cols][agg_subt, v]
is_sec_colorable = self._memory_array[att_cols][att_subt, vn]
if is_first_colorable and is_sec_colorable:
self._fill_mappings(agg_cols, agg_subt, v,
att_cols, att_subt, vn,
cols_subset)
def _fill_mappings(self, agg_cols, agg_subt, v,
att_cols, att_subt, vn,
cols_subset):
if cols_subset not in self._memory_array:
match_array = np.zeros((len(self._tree), len(self._graph)),
dtype=bool)
self._memory_array[cols_subset] = match_array
self._memory_array[cols_subset][agg_subt, v] = True
connected_subt = (cols_subset, agg_subt, v)
if connected_subt not in self._mapping_restore:
last_agg = (agg_cols, agg_subt, v)
last_att = (att_cols, att_subt, vn)
self._mapping_restore[connected_subt] = (last_agg, last_att)
def _subtree_connections_asc(self) -> Iterator[SubtreeData]:
for agg_subt in self._bottom_up_order:
agg_subt_size = 1
for att_subt in self._get_direct_children(agg_subt):
att_subt_size = self._children_count[att_subt] + 1
yield SubtreeData(
agg_root=agg_subt,
agg_size=agg_subt_size,
att_root=att_subt,
att_size=att_subt_size
)
agg_subt_size = agg_subt_size + att_subt_size
def _get_direct_children(self, node: int) -> Iterator[int]:
for v in self._tree.neighbors(node):
buo = self._bottom_up_order
if buo.index(node) > buo.index(v):
yield v
def _subtree_colorings(self, agg_size, att_size):
for cols_subset, agg_cols, att_cols in self._colors_subset_iterator(
agg_size, att_size
):
is_first_possible = agg_cols in self._memory_array
is_second_possible = att_cols in self._memory_array
if is_first_possible and is_second_possible:
yield cols_subset, agg_cols, att_cols
def _colors_subset_iterator(self, agg_size, att_size):
avail_cols = agg_size + att_size
for cols_subset in list_subsets_of_given_size(
self._colors, avail_cols
):
for agg_cols, att_cols in pairs_of_sets(
cols_subset, agg_size, att_size
):
yield cols_subset, agg_cols, att_cols
class SubtreeAnalizerFactory():
def __init__(self, tree: nx.Graph, graph: nx.Graph):
self._tree = tree
self._graph = graph
self._size = len(self._tree)
self._nodes_order = list(self._bottom_up_order(0))
self._children_count = self._count_all_children(self._nodes_order, 0)
def create(self, graph_colors: List[int]) -> SubtreeAnalizer:
if (len(graph_colors) != len(self._graph)):
raise ValueError(
"Graph colors mapping does not cover all graph vertices")
colors = frozenset(graph_colors)
mapping_restore = self._initialize_memory(graph_colors)
return SubtreeAnalizer(
0, self._size, self._children_count, self._nodes_order,
self._tree, self._graph, graph_colors, colors, mapping_restore
)
def _bottom_up_order(self, root: int) -> Iterator[int]:
top_down = nx.bfs_tree(self._tree, source=root)
for g in reversed(list(top_down)):
yield g
def _count_all_children(self, nodes_order: List[int],
root: int) -> List[int]:
"""
returns list of counts of descendants for every node in tree
by the direction given by root node
"""
nodes_order = list(nodes_order)
children_count = [0]*len(self._tree)
for v in nodes_order:
vo = v
count = 0
for nv in self._tree.neighbors(vo):
if nodes_order.index(nv) < nodes_order.index(vo):
count = count + 1 + children_count[nv]
children_count[vo] = count
return children_count
def _initialize_memory(self, graph_colors) -> Dict[FrozenSet, np.ndarray]:
iso_subtree = {}
graph_size = len(self._graph)
tree_size = len(self._tree)
colors = set(graph_colors)
for c in colors:
match_array = np.zeros((tree_size, graph_size),
dtype=bool)
iso_subtree[frozenset([c])] = match_array
for c in colors:
for t in self._tree.nodes():
for v in self._graph.nodes:
if graph_colors[v] == c:
iso_subtree[frozenset([c])][t, v] = True
return iso_subtree
def color_coding_subtree(tree, graph):
k = len(tree)
attempts = int(exp(k+2))
mapping = None
random_coloring = None
analizer_factory = SubtreeAnalizerFactory(tree, graph)
for i in range(attempts):
print(f"Attempt nr {i}", end='\r')
random_coloring = [randint(0, k) for i in range(len(graph))]
analizer = analizer_factory.create(random_coloring)
mapping = analizer.find_subtree()
if mapping is not None:
break
return mapping, random_coloring
| sowiks2711/color-coding-subtree-isomorphism | color_coding/time_optimised_alg.py | time_optimised_alg.py | py | 8,360 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "networkx.Graph",
"line... |
17145656675 | import json
import flask
from flask import request
from datetime import datetime
import psycopg2
from flask import make_response
from werkzeug import exceptions
import waitress
app = flask.Flask(__name__)
def query(sql, *args):
with psycopg2.connect("dbname=nosp_walk user=postgres") as conn:
with conn.cursor() as cursor:
cursor.execute(sql, *args)
return list(cursor)
@app.route("/")
def main_page():
return {"page": "main"}
@app.route("/sign_in", methods=["POST"])
def sign_in():
json_data = json.loads(request.data)
key_hash = json_data["key"]
try:
user = query(
f"select _id from users where key_hash = '{key_hash}'",
)[0][0]
except Exception as e:
raise exceptions.Forbidden(e)
if not user:
raise exceptions.Forbidden()
resp = make_response({"access": "granted"})
resp.set_cookie("user", str(user))
return resp
@app.route("/turn")
def turn():
user = request.cookies.get("user")
if not user:
print("no user")
return {"error": "cannot resolve user"}, 400
walking_user_id = query(
"""
select
user_id,
count(*) as walk_count
from walks
group by user_id
order by walk_count
""")[0][0]
if str(walking_user_id) == user:
return {"turn": "you"}
return {"turn": "other"}
@app.route("/history")
def get_history():
req_data = request.args
from_date, to_date = (req_data["from"], req_data["to"])
walks = query(
f"""
select * from walks
where walked_at between $1 and $2
""",
datetime.fromtimestamp(from_date),
datetime.fromtimestamp(to_date),
)
walks = [
{"date": w["date"], "time": w["time"], "walker": w["walker"]} for w in walks
]
return walks
@app.route("/walk", methods=["POST"])
def walk():
json_data = json.loads(request.data)
walk_timestamp = json_data["timestamp"]
walker = json_data["walker"]
assert walker in ("P", "M")
if request.cookies.get("user") != walker:
raise exceptions.Forbidden()
query(
f"""
insert into walks (walker, walked_at) values ($1, $2)
""",
datetime.fromtimestamp(walk_timestamp),
walker,
)
return {"status": "walk_registered"}
| Nosp27/nosp-walk | backend/app_init.py | app_init.py | py | 2,329 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"lin... |
33577989736 | import os, sys
import torch
import torchvision as tv
import cv2
import numpy as np
from matplotlib import pyplot as plt
from dataset import coco_labels
def box_cxcywh_to_xyxy(box):
"""
Convert bounding box from center-size to xyxy format.
:param box: bounding box in center-size format
:return: bounding box in xyxy format
"""
x_c, y_c, w, h = box[...,0], box[...,1], box[...,2], box[...,3]
b = [
(x_c - (w*0.5)), (y_c - (h*0.5)),
(x_c + (w*0.5)), (y_c + (h*0.5)),
]
return torch.stack(b, dim=-1)
def box_iou(b1, b2):
"""returns the iou between the set of boxes 1 and boxes 2
assumes that b1 and b2 are in xyxy format
Args:
b1 (torch.Tensor): first set of boxes
b2 (torch.Tensor): second set of boxes
Returns:
tuple(torch.Tensor, torch.Tensor): the iou and the union of the boxes in [N, M] format
"""
area1 = (b1[...,2] - b1[...,0]) * (b1[...,3] - b1[...,1])
area2 = (b2[...,2] - b2[...,0]) * (b2[...,3] - b2[...,1])
lt = torch.max(b1[:, None, :2], b2[:, :2]) # [N,M,2]
rb = torch.min(b1[:, None, 2:], b2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[...,0] * wh[...,1] # [N,M]
union = area1[:, None] + area2 - inter # [N,1]
iou = inter / (union + 1e-6)
return iou, union
def box_giou(b1, b2):
"""returns the giou between the set of boxes 1 and boxes 2
assumes that b1 and b2 are in xyxy format
Args:
b1 (torch.Tensor): first set of boxes
b2 (torch.Tensor): second set of boxes
Returns:
tuple(torch.Tensor, torch.Tensor): the giou and iou of the boxes in [N, M] format
"""
iou, union = box_iou(b1, b2)
lt = torch.min(b1[:, None, :2], b2[:, :2]) # [N,M,2]
rb = torch.max(b1[:, None, 2:], b2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[...,0] * wh[...,1] # [N,M]
giou = iou - (area - union) / (area + 1e-6)
return giou, iou | 11mhg/theia-detr | utils.py | utils.py | py | 2,045 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.stack",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 69,
... |
31267879602 | from rest_framework.fields import IntegerField
from rest_framework.serializers import ModelSerializer, Serializer
from post.models import Post, PostLike
class PostCreateSerializer(ModelSerializer):
class Meta:
model = Post
fields = (
'id',
'text',
'author_id',
)
extra_kwargs = {
'author_id': {'write_only': True},
}
class PostLikeSerializer(ModelSerializer):
class Meta:
model = PostLike
fields = (
'id',
'post',
'user_id',
)
extra_kwargs = {
'user_id': {'write_only': True},
}
class PostLikeAnalyticsSerializer(Serializer):
number_of_likes = IntegerField()
| RomanDemianenko/starnavi | post/api/serialzers.py | serialzers.py | py | 754 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "post.models.Post",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 20,
"usage_type": "... |
25645483609 | # coding = utf-8
import os
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset, Dataset)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, _input, _output = None):
"""Constructs a InputExample.
Args:
Input: Prompt
Output: Generated text
"""
self.guid = guid
self.input = _input
self.output = _output
class DiaDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, data_dir, dataset):
self.data_dir = data_dir
self.dataset = dataset
def get_train_examples(self):
"""See base class."""
return self._create_examples(
self._read_txt(os.path.join(self.data_dir, "train.txt")), "train")
def get_dev_examples(self):
"""See base class."""
return self._create_examples(
self._read_txt(os.path.join(self.data_dir, "dev.txt")), "dev")
def get_test_examples(self):
"""See base class."""
return self._create_examples(
self._read_txt(os.path.join(self.data_dir, "test.txt")), "test")
def _create_examples(self, lines, set_type):
examples = []
input_ = lines[0][0]
output_ = lines[0][1]
length = len(input_)
for i in range(length):
guid = "%s-%s" % (set_type, i+1)
_input = input_[i]
_output = output_[i]
examples.append(InputExample(guid=guid, _input=_input, _output=_output))
return examples
@classmethod
def _read_txt(cls, input_file):
'''
read file
return format :
'''
if os.path.exists(input_file) is False:
return []
data = []
input_text = []
output_text = []
with open(input_file, "r", encoding = "utf-8") as f:
for line in f:
if len(line) == 0:
continue
splits = line.strip().split('\t')
input_text.append(splits[0])
output_text.append(splits[1])
if len(input_text) > 0:
data.append((input_text, output_text))
return data
def convert_to_feature(self, tokenizer, examples, max_seq_length=16):
# entities
features = []
for ex_index, example in enumerate(examples):
labels = []
valid_ids1 = []
valid_ids2 = []
input_text = example.input
output_text = example.output
token1 = tokenizer.tokenize(input_text)
token2 = tokenizer.tokenize(output_text)
token1 = token1[:max_seq_length - 2]
token2 = token2[:max_seq_length - 2]
for m in range(len(token1)):
if m == 0:
valid_ids1.append(1)
else:
valid_ids1.append(0)
for m in range(len(token2)):
if m == 0:
valid_ids2.append(1)
else:
valid_ids2.append(0)
tokens1 = ["[CLS]"] + token1 + ["[SEP]"]
tokens2 = ["[CLS]"] + token2 + ["[SEP]"]
valid_ids1 = [1] + valid_ids1 + [1]
valid_ids2 = [1] + valid_ids2 + [1]
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
input_ids2 = tokenizer.convert_tokens_to_ids(tokens2)
segment_ids = [0] * max_seq_length
if len(input_ids1) < max_seq_length:
input_ids1 += [0] * (max_seq_length - len(input_ids1))
valid_ids1 += [0] * (max_seq_length - len(valid_ids1))
if len(input_ids2) < max_seq_length:
input_ids2 += [0] * (max_seq_length - len(input_ids2))
valid_ids2 += [0] * (max_seq_length - len(valid_ids2))
assert len(input_ids1) == max_seq_length
assert len(input_ids2) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(valid_ids1) == max_seq_length
assert len(valid_ids2) == max_seq_length
features.append({
"input_ids": torch.tensor(input_ids1, dtype=torch.long),
"output_ids": torch.tensor(input_ids2, dtype=torch.long),
"segment_ids": torch.tensor(segment_ids, dtype=torch.long),
"valid_ids1": torch.tensor(valid_ids1, dtype=torch.long),
"valid_ids2": torch.tensor(valid_ids2, dtype=torch.long),
"input_text": example.input,
"output_text": example.output
})
return features
def get_dataloader(self, features, batch_size, mode='train', rank=0, world_size=1):
if mode == "train" and world_size > 1:
features = features[rank::world_size]
data_set = DiaDataset(features)
sampler = RandomSampler(data_set)
return DataLoader(data_set, sampler=sampler, batch_size=batch_size)
def get_all_dataloader(self, tokenizer, args):
#train
train_examples = self.get_train_examples()
train_features = self.convert_to_feature(tokenizer, train_examples, args.max_seq_len)
train_dataloader = self.get_dataloader(train_features, mode="train", rank=args.rank,
world_size=args.world_size, batch_size=args.batch_size)
#test
test_examples = self.get_test_examples()
test_features = self.convert_to_feature(tokenizer, test_examples, args.max_seq_len)
test_dataloader = self.get_dataloader(test_features, mode="test", batch_size=1)
#dev
dev_examples = self.get_dev_examples()
dev_features = self.convert_to_feature(tokenizer, dev_examples, args.max_seq_len)
dev_dataloader = self.get_dataloader(dev_features, mode="dev", batch_size=1)
return train_dataloader, dev_dataloader, test_dataloader | BruceQ74/Basic_NLG | data_utils.py | data_utils.py | py | 6,353 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
30188716160 | import datetime
from util.util import CGreeks, CSingleOptHolding
from util.COption import COption
import pandas as pd
class COptHolding(object):
"""期权持仓类"""
def __init__(self, str_logfile_path=None):
"""
初始化持仓数据及持仓汇总希腊字母
class attributes:
holdings: {'code': CSingleOptHolding}
mode: 模式,逐步加仓、恒定gamma
status: 状态,onposition=建仓,positioned=建仓完成,onliquidation=平仓
cashinflow: 资金流入(卖出开仓、卖出平仓)
cashoutflow: 资金流出(买入开仓、买入平仓)
commission: 佣金
pandl: 盈亏
capital: 规模
gammaexposure: gamma暴露
greeks: CGreeks类
logger: 日志类
logfilepath: 日志文件路径
"""
self.holdings = {}
self.mode = ''
self.status = ''
self.cashinflow = 0.0
self.cashoutflow = 0.0
self.commission = 0.0
self.pandl = 0.0
self.capital = 0.0
self.gammaexposure = 0.0
self.greeks = CGreeks()
# self.logger = None
self.logfilepath = str_logfile_path
def load_holdings(self, holding_filename):
"""加载持仓数据,同时加载期权的分钟行情数据"""
# 先清空持仓数据
self.holdings = {}
with open(holding_filename, 'rt') as f:
bIsPandLSection = False
bIsHoldingSection = False
while True:
strline = f.readline().strip('\n')
if not strline:
break
if strline == '[begin of P&L]':
bIsPandLSection = True
strline = f.readline().strip('\n')
if strline == '[end of P&L]':
bIsPandLSection = False
strline = f.readline().strip('\n')
if strline == '[begin of holdings]':
bIsHoldingSection = True
strline = f.readline().strip('\n')
if strline == '[end of holdings]':
bIsHoldingSection = False
if bIsPandLSection:
ind_name, ind_value = strline.split(sep='=')
if ind_name == 'mode':
self.mode = ind_value
elif ind_name == 'status':
if self.status != 'onliquidation':
self.status = ind_value
elif ind_name == 'cashinflow':
self.cashinflow = float(ind_value)
elif ind_name == 'cashoutflow':
self.cashoutflow = float(ind_value)
elif ind_name == 'commission':
self.commission = float(ind_value)
elif ind_name == 'pandl':
self.pandl = float(ind_value)
elif ind_name == 'capital':
self.capital = float(ind_value)
elif ind_name == 'gammaexposure':
self.gammaexposure = float(ind_value)
if bIsHoldingSection:
holding_data = strline.split(sep='|')
dict_holding = {}
for data in holding_data:
ind_name, ind_value = data.split(sep='=')
dict_holding[ind_name] = ind_value
if dict_holding['code'] not in self.holdings:
holding_side = int(dict_holding['holdingside'])
holding_vol = int(dict_holding['holdingvol'])
holding_opt = COption(dict_holding['code'])
self.holdings[dict_holding['code']] = CSingleOptHolding(holding_side, holding_vol, holding_opt)
else:
# strmsg = "持仓文件:" + holding_filename + "中期权" + dict_holding['code'] + "的持仓数据重复\n"
strmsg = "holding data of option:%s in holding file: %s was duplicated.\n" % (dict_holding['code'], holding_filename)
# self.logger.error(strmsg)
with open(self.logfilepath, 'at') as f:
f.write(strmsg)
def save_holdings(self, holding_filename):
"""保存持仓数据"""
# 如果当前的状态为onliquidation,那么把状态改为onposition
if self.status == 'onliquidation':
self.status = 'onposition'
with open(holding_filename, 'wt') as f:
f.write('[begin of P&L]\n')
f.write('mode=%s\n' % self.mode)
f.write('status=%s\n' % self.status)
f.write('cashinflow=%0.2f\n' % self.cashinflow)
f.write('cashoutflow=%0.2f\n' % self.cashoutflow)
f.write('commission=%0.2f\n' % self.commission)
f.write('pandl=%0.2f\n' % self.pandl)
f.write('capital=%0.2f\n' % self.capital)
f.write('gammaexposure=%0.2f\n' % self.gammaexposure)
f.write('gamma_mv=%0.2f\n' % self.greeks.gamma_mv)
f.write('delta_mv=%0.2f\n' % self.greeks.delta_mv)
f.write('total_margin=%0.2f\n' % self.total_margin())
f.write('[end of P&L]\n\n')
f.write('[begin of holdings]\n')
for opt_code, opt_holding in self.holdings.items():
f.write('code=%s|holdingside=%d|holdingvol=%d\n' % (opt_code, opt_holding.holdingside, opt_holding.holdingvol))
f.write('[end of holdings]\n')
def calc_greeks(self, underlying_price, risk_free, dividend_rate, vol, calc_datetime=datetime.datetime.now()):
"""计算期权持仓的汇总希腊字母值"""
self.greeks.delta = 0.0
self.greeks.gamma = 0.0
self.greeks.vega = 0.0
self.greeks.delta_mv = 0.0
self.greeks.gamma_mv = 0.0
# 遍历持仓,先计算每个持仓期权的希腊字母,然后汇总希腊字母值
for code, holding in self.holdings.items():
holding.COption.calc_greeks(underlying_price, risk_free, dividend_rate, vol, calc_datetime)
self.greeks.delta += (holding.COption.greeks.delta * holding.COption.multiplier *
holding.holdingside * holding.holdingvol)
self.greeks.gamma += (holding.COption.greeks.gamma * holding.COption.multiplier *
holding.holdingside * holding.holdingvol)
self.greeks.vega += (holding.COption.greeks.vega * holding.COption.multiplier *
holding.holdingside * holding.holdingvol)
self.greeks.delta_mv += (holding.COption.greeks.delta_mv * holding.holdingside * holding.holdingvol)
self.greeks.gamma_mv += (holding.COption.greeks.gamma_mv * holding.holdingside * holding.holdingvol)
def calc_margin(self, trading_day):
"""
计算持仓期权的开仓保证金
:param trading_day: 日期(类型=datetime.date)
:return:
"""
# 1.读取标的日K线时间序列
underlying_quote = pd.read_csv('../data/underlying_daily_quote.csv', index_col=0, parse_dates=[0])
underlying_pre_close = float(underlying_quote.ix[trading_day, 'pre_close'])
# 2.读取样本期权的日行情
strdate = trading_day.strftime('%Y-%m-%d')
strfilepath = '../../opt_quote/' + strdate + '/50OptionDailyQuote.csv'
opts_quote = pd.read_csv(strfilepath, usecols=range(1, 14), parse_dates=[0], encoding='gb18030', dtype={'option_code':str})
opts_quote.set_index(keys='option_code', inplace=True)
# 3.计算持仓期权的开仓保证金
for optcode, holding in self.holdings.items():
if optcode in opts_quote.index:
opt_pre_settle = float(opts_quote.ix[optcode, 'pre_settle'])
holding.COption.calc_margin(opt_pre_settle, underlying_pre_close)
else:
holding.COption.margin = 3000.0
def total_margin(self):
"""返回持仓期权的合计保证金"""
fmargin = 0.0
for optcode, optholding in self.holdings.items():
if optholding.holdingside == -1:
fmargin += optholding.COption.margin * optholding.holdingvol
return fmargin
def margin_ratio(self):
"""返回保证金占资金规模的比例"""
return self.total_margin() / self.capital
def get_least_timevalue_opts(self, underlying_price, trading_datetime, exclusions=None):
"""
取得持仓中时间价值最小的认购、认沽期权
:param underlying_price: 标的最新价格
:param trading_datetime: 交易时间(类型=datetime.datetime)
:param exclusions: 需要排除的代码列表
:return: tuple(认购COption,认沽COption)
"""
if exclusions is None:
exclusions = []
opt_call = None
opt_put = None
for optcode, holding in self.holdings.items():
if holding.holdingvol > 0 and optcode not in exclusions:
if holding.COption.opt_type == "Call":
if opt_call is None:
opt_call = holding.COption
else:
if holding.COption.time_value(underlying_price, trading_datetime) < \
opt_call.time_value(underlying_price, trading_datetime):
opt_call = holding.COption
else:
if opt_put is None:
opt_put = holding.COption
else:
if holding.COption.time_value(underlying_price, trading_datetime) < \
opt_put.time_value(underlying_price, trading_datetime):
opt_put = holding.COption
return opt_call, opt_put
def get_minmax_gamma_opts(self, exclusions=None):
"""
取得持仓中gamma值最大的认购认沽期权,以及gamma值最小的认购认沽期权
:param exclusions: 需要排除的代码列表
:return: dict{'min'/'max':tuple(COption of call, COption of put)}
"""
if exclusions is None:
exclusions = []
min_gamma_call = None
min_gamma_put = None
max_gamma_call = None
max_gamma_put = None
for optcode, holding in self.holdings.items():
if holding.holdingvol > 0 and optcode not in exclusions:
if holding.COption.opt_type == 'Call':
if min_gamma_call is None:
min_gamma_call = holding.COption
if max_gamma_call is None:
max_gamma_call = holding.COption
if holding.COption.greeks.gamma < min_gamma_call.greeks.gamma:
min_gamma_call = holding.COption
if holding.COption.greeks.gamma > max_gamma_call.greeks.gamma:
max_gamma_call = holding.COption
else:
if min_gamma_put is None:
min_gamma_put = holding.COption
if max_gamma_put is None:
max_gamma_put = holding.COption
if holding.COption.greeks.gamma < min_gamma_put.greeks.gamma:
min_gamma_put = holding.COption
if holding.COption.greeks.gamma > max_gamma_put.greeks.gamma:
max_gamma_put = holding.COption
return {'min': (min_gamma_call, min_gamma_put), 'max': (max_gamma_call, max_gamma_put)}
def verify_update_tradedata(self, tradedata):
"""
校验单条交易记录的有效性,若有效,那么同时更新交易数据至持仓数据
:param tradedata: 单条期权交易记录
:return: 校验通过并返回True,校验错误返回False
"""
if tradedata.code in self.holdings:
if tradedata.tradeside == 'buy' and tradedata.openclose == 'open':
if self.holdings[tradedata.code].holdingside == 1:
self.holdings[tradedata.code].holdingvol += tradedata.tradevol
self.cashoutflow += tradedata.tradevalue
self.commission += tradedata.commission
# return True
else:
return False
elif tradedata.tradeside == 'buy' and tradedata.openclose == 'close':
if self.holdings[tradedata.code].holdingside == -1:
self.holdings[tradedata.code].holdingvol -= tradedata.tradevol
self.cashoutflow += tradedata.tradevalue
self.commission += tradedata.commission
# return True
else:
return False
elif tradedata.tradeside == 'sell' and tradedata.openclose == 'open':
if self.holdings[tradedata.code].holdingside == -1:
self.holdings[tradedata.code].holdingvol += tradedata.tradevol
self.cashinflow += tradedata.tradevalue
self.commission += tradedata.commission
# return True
else:
return False
elif tradedata.tradeside == 'sell' and tradedata.openclose == 'close':
if self.holdings[tradedata.code].holdingside == 1:
self.holdings[tradedata.code].holdingvol -= tradedata.tradevol
self.cashinflow += tradedata.tradevalue
self.commission += tradedata.commission
# return True
else:
return False
else:
if tradedata.openclose == 'close':
return False
elif tradedata.tradeside == 'buy' and tradedata.openclose == 'open':
# holding = CSingleOptHolding(side=1, vol=tradedata.tradevol, opt=COption(tradedata.code))
holding = CSingleOptHolding(side=1, vol=tradedata.tradevol, opt=tradedata.opt)
self.cashoutflow += tradedata.tradevalue
self.commission += tradedata.commission
self.holdings[tradedata.code] = holding
# return True
elif tradedata.tradeside == 'sell' and tradedata.openclose == 'open':
# holding = CSingleOptHolding(side=-1, vol=tradedata.tradevol, opt=COption(tradedata.code))
holding = CSingleOptHolding(side=-1, vol=tradedata.tradevol, opt=tradedata.opt)
self.cashinflow += tradedata.tradevalue
self.commission += tradedata.commission
self.holdings[tradedata.code] = holding
# return True
else:
return
# 如果该交易记录对应期权持仓量=0,那么删除该条期权持仓
if self.holdings[tradedata.code].holdingvol == 0:
del_holding = self.holdings.pop(tradedata.code)
if del_holding is not None:
# self.logger.info("期权%s的持仓量等于0,该持仓已删除。" % tradedata.code)
# strmsg = "期权%s的持仓量等于0,该持仓已删除。\n" % tradedata.code
strmsg = "holding vol of option: %s is equal to 0, the holding data was deleted.\n" % tradedata.code
else:
# self.logger.error("删除期权%s持仓数据失败!" % tradedata.code)
# strmsg = "删除期权%s持仓数据失败!\n" % tradedata.code
strmsg = "failed to delete holding data of option: %s\n" % tradedata.code
with open(self.logfilepath, 'at') as f:
f.write(strmsg)
return True
def update_holdings(self, tradedatas):
"""
根据给定的期权交易数据列表,更新持仓数据
:param tradedatas: 期权交易数据(COptTradeData)列表
:return: 无
"""
# 遍历期权交易数据列表,更新每条期权交易数据
with open(self.logfilepath, 'at') as f:
for tradedata in tradedatas:
log_msg = "trade info: time=%s,code=%s,tradeside=%s,openclose=%s,price=%f,vol=%d,value=%f,commission=%f\n" % \
(tradedata.time.strftime('%H:%M:%S'), tradedata.code, tradedata.tradeside, tradedata.openclose,
tradedata.tradeprice, tradedata.tradevol, tradedata.tradevalue, tradedata.commission)
# self.logger.info(log_msg)
f.write(log_msg)
if not self.verify_update_tradedata(tradedata):
# print('%s,期权%s的交易数据出错\n' % (tradedata.time.strftime('%H:%M:%S'), tradedata.code))
# self.logger.error('上一条交易数据错误')
f.write('the last trading data was wrong.\n')
# 更新持仓期权的保证金
if tradedatas:
self.calc_margin(tradedatas[0].time.date())
def holding_mv(self, trading_datetime):
"""
计算持仓期权的总市值
:param trading_datetime: 计算时间,类型=datetime.datetime
:return:
"""
opt_mv = 0.0
for optcode, optholding in self.holdings.items():
opt_price = optholding.COption.quote_1min.ix[trading_datetime, 'close']
opt_mv += optholding.holdingside * optholding.holdingvol * opt_price * optholding.COption.multiplier
return opt_mv
def p_and_l(self, trading_datetime):
"""
计算给定时间点时的盈亏
:param trading_datetime: 计算时间,类型=datetime.datetime
:return:
"""
self.pandl = self.holding_mv(trading_datetime) + self.cashinflow - self.cashoutflow - self.commission
return self.pandl | rafs/OptVolTrading | util/COptHolding.py | COptHolding.py | py | 18,040 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "util.util.CGreeks",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "util.COption.COption",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "util.util.CSingleOptHolding",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "da... |
38566374238 | import torch
import cv2
import numpy as np
import math
from sklearn.metrics import f1_score
from torch.autograd import Variable
from matplotlib.image import imread
# function for colorizing a label image:
def label_img_to_color(img: torch.Tensor):
# label_to_color = {
# 0: [128, 64,128],
# 1: [244, 35,232],
# 2: [ 70, 70, 70],
# 3: [102,102,156],
# 4: [190,153,153],
# 5: [153,153,153],
# 6: [250,170, 30],
# 7: [220,220, 0],
# 8: [107,142, 35],
# 9: [152,251,152],
# 10: [ 70,130,180],
# 11: [220, 20, 60],
# 12: [255, 0, 0],
# 13: [ 0, 0,142],
# 14: [ 0, 0, 70],
# 15: [ 0, 60,100],
# 16: [ 0, 80,100],
# 17: [ 0, 0,230],
# 18: [119, 11, 32],
# 19: [81, 0, 81]
# }
label_to_color = {
0: [255, 255, 255],
1: [0, 0, 0],
}
label_to_color = {
0: [0, 0, 0],
1: [255, 255, 255],
}
img = img.squeeze(0)
img = img.unsqueeze(-1)
img = img.repeat(1, 1, 3) # repeat 3 channel on grey scale image
img_color = torch.where(img[:, :, :, ] == torch.tensor([1, 1, 1]), torch.tensor(label_to_color[1]), torch.tensor(label_to_color[0])) # temp version
return img_color
def grey_to_heatmap(img):
heatmap = cv2.applyColorMap(img, cv2.COLORMAP_JET)
return heatmap
def otsu_threshold(src):
"""
src: Tensor (1, 1, w, h) or (1, w, h) or (w, h)
return: ndarray (1, w, h)
"""
output_np = src.cpu().detach().numpy().squeeze(0).squeeze(0) * 255
_, output_th = cv2.threshold(output_np.astype(np.uint8), -1, 1, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
output_th = output_th.reshape((1, output_th.shape[0], output_np.shape[1]))
return output_th
def threshold(src, th):
"""
src: Tensor (1, 1, w, h) or (1, w, h) or (w, h)
return: ndarray (1, w, h)
"""
output_np = src.cpu().detach().numpy().squeeze(0) * 255
th = th * 255
_, output_th = cv2.threshold(output_np.astype(np.uint8), th, 1, cv2.THRESH_BINARY)
output_th = output_th.reshape((1, output_th.shape[0], output_np.shape[1]))
return output_th
def resize_img_by_resolution(img, maximum_resolution=None):
if maximum_resolution is None:
maximum_resolution = 1280 * 720
else:
maximum_resolution = int(maximum_resolution[0] * maximum_resolution[1])
img_width = img.width
img_height = img.height
img_definition = img_width * img_height
if img_definition > maximum_resolution:
reduction_ratio = img_definition / maximum_resolution
reduction_ratio = math.sqrt(reduction_ratio)
img_width_r = int(img_width / reduction_ratio)
img_height_r = int(img_height / reduction_ratio)
img = img.resize((img_width_r, img_height_r))
return img
# https://github.com/VainF/DeepLabV3Plus-Pytorch/blob/master/metrics/stream_metrics.py
class _StreamMetrics(object):
def __init__(self):
""" Overridden by subclasses """
raise NotImplementedError()
def update(self, gt, pred):
""" Overridden by subclasses """
raise NotImplementedError()
def get_results(self):
""" Overridden by subclasses """
raise NotImplementedError()
def to_str(self, metrics):
""" Overridden by subclasses """
raise NotImplementedError()
def reset(self):
""" Overridden by subclasses """
raise NotImplementedError()
class StreamSegMetrics(_StreamMetrics):
"""
Stream Metrics for Semantic Segmentation Task
"""
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten())
@staticmethod
def to_str(results):
string = "\n"
for k, v in results.items():
if k != "Class IoU":
string += "%s: %f\n" % (k, v)
# string+='Class IoU:\n'
# for k, v in results['Class IoU'].items():
# string += "\tclass %d: %f\n"%(k, v)
return string
def _fast_hist(self, label_true, label_pred):
mask = (label_true >= 0) & (label_true < self.n_classes)
hist = np.bincount(
self.n_classes * label_true[mask].astype(int) + label_pred[mask],
minlength=self.n_classes ** 2,
).reshape(self.n_classes, self.n_classes)
return hist
def get_results(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return {
"Overall Acc": acc,
"Mean Acc": acc_cls,
"FreqW Acc": fwavacc,
"Mean IoU": mean_iu,
"Class IoU": cls_iu,
}
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
class AverageMeter(object):
"""Computes average values"""
def __init__(self):
self.book = dict()
def reset_all(self):
self.book.clear()
def reset(self, id):
item = self.book.get(id, None)
if item is not None:
item[0] = 0
item[1] = 0
def update(self, id, val):
record = self.book.get(id, None)
if record is None:
self.book[id] = [val, 1]
else:
record[0] += val
record[1] += 1
def get_results(self, id):
record = self.book.get(id, None)
assert record is not None
return record[0] / record[1]
class ImageProcessing(object):
'''
@issue
'hsv_to_rgb' and 'rgb_to_hsv' convert the image with H 180 value to 0, resulting blue color to red color
'''
@staticmethod
def rgb_to_lab(img, is_training=True):
""" PyTorch implementation of RGB to LAB conversion: https://docs.opencv.org/3.3.0/de/d25/imgproc_color_conversions.html
Based roughly on a similar implementation here: https://github.com/affinelayer/pix2pix-tensorflow/blob/master/pix2pix.py
:param img: image to be adjusted
:returns: adjusted image
:rtype: Tensor
"""
img = img.permute(2, 1, 0)
shape = img.shape
img = img.contiguous()
img = img.view(-1, 3)
img = (img / 12.92) * img.le(0.04045).float() + (((torch.clamp(img,
min=0.0001) + 0.055) / 1.055) ** 2.4) * img.gt(
0.04045).float()
rgb_to_xyz = Variable(torch.FloatTensor([ # X Y Z
[0.412453, 0.212671, 0.019334], # R
[0.357580, 0.715160, 0.119193], # G
[0.180423, 0.072169,
0.950227], # B
]), requires_grad=False).cuda()
img = torch.matmul(img, rgb_to_xyz)
img = torch.mul(img, Variable(torch.FloatTensor(
[1 / 0.950456, 1.0, 1 / 1.088754]), requires_grad=False).cuda())
epsilon = 6 / 29
img = ((img / (3.0 * epsilon ** 2) + 4.0 / 29.0) * img.le(epsilon ** 3).float()) + \
(torch.clamp(img, min=0.0001) **
(1.0 / 3.0) * img.gt(epsilon ** 3).float())
fxfyfz_to_lab = Variable(torch.FloatTensor([[0.0, 500.0, 0.0], # fx
# fy
[116.0, -500.0, 200.0],
# fz
[0.0, 0.0, -200.0],
]), requires_grad=False).cuda()
img = torch.matmul(img, fxfyfz_to_lab) + Variable(
torch.FloatTensor([-16.0, 0.0, 0.0]), requires_grad=False).cuda()
img = img.view(shape)
img = img.permute(2, 1, 0)
'''
L_chan: black and white with input range [0, 100]
a_chan/b_chan: color channels with input range ~[-110, 110], not exact
[0, 100] => [0, 1], ~[-110, 110] => [0, 1]
'''
img[0, :, :] = img[0, :, :] / 100
img[1, :, :] = (img[1, :, :] / 110 + 1) / 2
img[2, :, :] = (img[2, :, :] / 110 + 1) / 2
img[(img != img).detach()] = 0
img = img.contiguous()
return img.cuda()
@staticmethod
def lab_to_rgb(img, is_training=True):
""" PyTorch implementation of LAB to RGB conversion: https://docs.opencv.org/3.3.0/de/d25/imgproc_color_conversions.html
Based roughly on a similar implementation here: https://github.com/affinelayer/pix2pix-tensorflow/blob/master/pix2pix.py
:param img: image to be adjusted
:returns: adjusted image
:rtype: Tensor
"""
img = img.permute(2, 1, 0)
shape = img.shape
img = img.contiguous()
img = img.view(-1, 3)
img_copy = img.clone()
img_copy[:, 0] = img[:, 0] * 100
img_copy[:, 1] = ((img[:, 1] * 2) - 1) * 110
img_copy[:, 2] = ((img[:, 2] * 2) - 1) * 110
img = img_copy.clone().cuda()
del img_copy
lab_to_fxfyfz = Variable(torch.FloatTensor([ # X Y Z
[1 / 116.0, 1 / 116.0, 1 / 116.0], # R
[1 / 500.0, 0, 0], # G
[0, 0, -1 / 200.0], # B
]), requires_grad=False).cuda()
img = torch.matmul(
img + Variable(torch.cuda.FloatTensor([16.0, 0.0, 0.0])), lab_to_fxfyfz)
epsilon = 6.0 / 29.0
img = (((3.0 * epsilon ** 2 * (img - 4.0 / 29.0)) * img.le(epsilon).float()) +
((torch.clamp(img, min=0.0001) ** 3.0) * img.gt(epsilon).float()))
# denormalize for D65 white point
img = torch.mul(img, Variable(
torch.cuda.FloatTensor([0.950456, 1.0, 1.088754])))
xyz_to_rgb = Variable(torch.FloatTensor([ # X Y Z
[3.2404542, -0.9692660, 0.0556434], # R
[-1.5371385, 1.8760108, -0.2040259], # G
[-0.4985314, 0.0415560, 1.0572252], # B
]), requires_grad=False).cuda()
img = torch.matmul(img, xyz_to_rgb)
img = (img * 12.92 * img.le(0.0031308).float()) + ((torch.clamp(img,
min=0.0001) ** (
1 / 2.4) * 1.055) - 0.055) * img.gt(
0.0031308).float()
img = img.view(shape)
img = img.permute(2, 1, 0)
img = img.contiguous()
img[(img != img).detach()] = 0
return img
@staticmethod
def swapimdims_3HW_HW3(img):
"""Move the image channels to the first dimension of the numpy
multi-dimensional array
:param img: numpy nd array representing the image
:returns: numpy nd array with permuted axes
:rtype: numpy nd array
"""
if img.ndim == 3:
return np.swapaxes(np.swapaxes(img, 1, 2), 0, 2)
elif img.ndim == 4:
return np.swapaxes(np.swapaxes(img, 2, 3), 1, 3)
@staticmethod
def swapimdims_HW3_3HW(img):
"""Move the image channels to the last dimensiion of the numpy
multi-dimensional array
:param img: numpy nd array representing the image
:returns: numpy nd array with permuted axes
:rtype: numpy nd array
"""
if img.ndim == 3:
return np.swapaxes(np.swapaxes(img, 0, 2), 1, 2)
elif img.ndim == 4:
return np.swapaxes(np.swapaxes(img, 1, 3), 2, 3)
@staticmethod
def load_image(img_filepath, normaliser):
"""Loads an image from file as a numpy multi-dimensional array
:param img_filepath: filepath to the image
:returns: image as a multi-dimensional numpy array
:rtype: multi-dimensional numpy array
"""
img = ImageProcessing.normalise_image(
imread(img_filepath), normaliser) # NB: imread normalises to 0-1
return img
@staticmethod
def normalise_image(img, normaliser):
"""Normalises image data to be a float between 0 and 1
:param img: Image as a numpy multi-dimensional image array
:returns: Normalised image as a numpy multi-dimensional image array
:rtype: Numpy array
"""
img = img.astype('float32') / normaliser
return img
@staticmethod
def compute_mse(original, result):
"""Computes the mean squared error between to RGB images represented as multi-dimensional numpy arrays.
:param original: input RGB image as a numpy array
:param result: target RGB image as a numpy array
:returns: the mean squared error between the input and target images
:rtype: float
"""
return ((original - result) ** 2).mean()
@staticmethod
def compute_psnr(image_batchA, image_batchB, max_intensity):
"""Computes the PSNR for a batch of input and output images
:param image_batchA: numpy nd-array representing the image batch A of shape Bx3xWxH
:param image_batchB: numpy nd-array representing the image batch A of shape Bx3xWxH
:param max_intensity: maximum intensity possible in the image (e.g. 255)
:returns: average PSNR for the batch of images
:rtype: float
"""
num_images = image_batchA.shape[0]
psnr_val = 0.0
for i in range(0, num_images):
imageA = image_batchA[i, 0:3, :, :]
imageB = image_batchB[i, 0:3, :, :]
imageB = np.maximum(0, np.minimum(imageB, max_intensity))
psnr_val += 10 * \
np.log10(max_intensity ** 2 /
ImageProcessing.compute_mse(imageA, imageB))
return psnr_val / num_images
@staticmethod
def hsv_to_rgb(img):
"""Converts a HSV image to RGB
PyTorch implementation of RGB to HSV conversion: https://docs.opencv.org/3.3.0/de/d25/imgproc_color_conversions.html
Based roughly on a similar implementation here: http://code.activestate.com/recipes/576919-python-rgb-and-hsv-conversion/
:param img: HSV image
:returns: RGB image
:rtype: Tensor
"""
img = torch.clamp(img, 0, 1)
img = img.permute(2, 1, 0)
m1 = 0
m2 = (img[:, :, 2] * (1 - img[:, :, 1]) - img[:, :, 2]) / 60
m3 = 0
m4 = -1 * m2
m5 = 0
r = img[:, :, 2] + torch.clamp(img[:, :, 0] * 360 - 0, 0, 60) * m1 + torch.clamp(img[:, :, 0] * 360 - 60, 0,
60) * m2 + torch.clamp(
img[:, :, 0] * 360 - 120, 0, 120) * m3 + torch.clamp(img[:, :, 0] * 360 - 240, 0, 60) * m4 + torch.clamp(
img[:, :, 0] * 360 - 300, 0, 60) * m5
m1 = (img[:, :, 2] - img[:, :, 2] * (1 - img[:, :, 1])) / 60
m2 = 0
m3 = -1 * m1
m4 = 0
g = img[:, :, 2] * (1 - img[:, :, 1]) + torch.clamp(img[:, :, 0] * 360 - 0, 0, 60) * m1 + torch.clamp(
img[:, :, 0] * 360 - 60,
0, 120) * m2 + torch.clamp(img[:, :, 0] * 360 - 180, 0, 60) * m3 + torch.clamp(img[:, :, 0] * 360 - 240, 0,
120) * m4
m1 = 0
m2 = (img[:, :, 2] - img[:, :, 2] * (1 - img[:, :, 1])) / 60
m3 = 0
m4 = -1 * m2
b = img[:, :, 2] * (1 - img[:, :, 1]) + torch.clamp(img[:, :, 0] * 360 - 0, 0, 120) * m1 + torch.clamp(
img[:, :, 0] * 360 -
120, 0, 60) * m2 + torch.clamp(img[:, :, 0] * 360 - 180, 0, 120) * m3 + torch.clamp(
img[:, :, 0] * 360 - 300, 0, 60) * m4
img = torch.stack((r, g, b), 2)
img[(img != img).detach()] = 0
img = img.permute(2, 1, 0)
img = img.contiguous()
img = torch.clamp(img, 0, 1)
return img
@staticmethod
def rgb_to_hsv(img):
"""Converts an RGB image to HSV
PyTorch implementation of RGB to HSV conversion: https://docs.opencv.org/3.3.0/de/d25/imgproc_color_conversions.html
Based roughly on a similar implementation here: http://code.activestate.com/recipes/576919-python-rgb-and-hsv-conversion/
:param img: RGB image
:returns: HSV image
:rtype: Tensor
"""
img = torch.clamp(img, 0.000000001, 1)
img = img.permute(2, 1, 0)
# 3, H, W
shape = img.shape
img = img.contiguous()
img = img.view(-1, 3)
mx = torch.max(img, 1)[0]
mn = torch.min(img, 1)[0]
ones = Variable(torch.FloatTensor(
torch.ones((img.shape[0])))).cuda()
zero = Variable(torch.FloatTensor(torch.zeros(shape[0:2]))).cuda()
img = img.view(shape)
ones1 = ones[0:math.floor((ones.shape[0] / 2))]
ones2 = ones[math.floor(ones.shape[0] / 2):(ones.shape[0])]
mx1 = mx[0:math.floor((ones.shape[0] / 2))]
mx2 = mx[math.floor(ones.shape[0] / 2):(ones.shape[0])]
mn1 = mn[0:math.floor((ones.shape[0] / 2))]
mn2 = mn[math.floor(ones.shape[0] / 2):(ones.shape[0])]
df1 = torch.add(mx1, torch.mul(ones1 * -1, mn1))
df2 = torch.add(mx2, torch.mul(ones2 * -1, mn2))
df = torch.cat((df1, df2), 0)
del df1, df2
df = df.view(shape[0:2]) + 1e-10
mx = mx.view(shape[0:2])
img = img.cuda()
df = df.cuda()
mx = mx.cuda()
g = img[:, :, 1].clone().cuda()
b = img[:, :, 2].clone().cuda()
r = img[:, :, 0].clone().cuda()
img_copy = img.clone()
img_copy[:, :, 0] = (((g - b) / df) * r.eq(mx).float() + (2.0 + (b - r) / df)
* g.eq(mx).float() + (4.0 + (r - g) / df) * b.eq(mx).float())
img_copy[:, :, 0] = img_copy[:, :, 0] * 60.0
zero = zero.cuda()
img_copy2 = img_copy.clone()
img_copy2[:, :, 0] = img_copy[:, :, 0].lt(zero).float(
) * (img_copy[:, :, 0] + 360) + img_copy[:, :, 0].ge(zero).float() * (img_copy[:, :, 0])
img_copy2[:, :, 0] = img_copy2[:, :, 0] / 360
del img, r, g, b
img_copy2[:, :, 1] = mx.ne(zero).float() * (df / mx) + \
mx.eq(zero).float() * (zero)
img_copy2[:, :, 2] = mx
img_copy2[(img_copy2 != img_copy2).detach()] = 0
img = img_copy2.clone()
img = img.permute(2, 1, 0)
img = torch.clamp(img, 0.000000001, 1)
return img
@staticmethod
def apply_curve(img, C, slope_sqr_diff, channel_in, channel_out,
clamp=True, same_channel=True):
"""Applies a peicewise linear curve defined by a set of knot points to
an image channel
:param img: image to be adjusted
:param C: predicted knot points of curve
:returns: adjusted image
:rtype: Tensor
"""
slope = Variable(torch.zeros((C.shape[0] - 1))).cuda()
curve_steps = C.shape[0] - 1
'''
Compute the slope of the line segments
'''
for i in range(0, C.shape[0] - 1):
slope[i] = C[i + 1] - C[i]
'''
Compute the squared difference between slopes
'''
for i in range(0, slope.shape[0] - 1):
slope_sqr_diff += (slope[i + 1] - slope[i]) * (slope[i + 1] - slope[i])
'''
Use predicted line segments to compute scaling factors for the channel
'''
scale = float(C[0])
for i in range(0, slope.shape[0] - 1):
if clamp:
scale += float(slope[i]) * (torch.clamp(img[:, :, channel_in] * curve_steps - i, 0, 1))
# scale += float(slope[i]) * (torch.clamp(img[:, :, channel_in], 0, 1))
else:
scale += float(slope[i]) * (img[:, :, channel_in] * curve_steps - i)
img_copy = img.clone()
if same_channel:
# channel in and channel out are the same channel
img_copy[:, :, channel_out] = img[:, :, channel_in] * scale
else:
# otherwise
img_copy[:, :, channel_out] = img[:, :, channel_out] * scale
img_copy = torch.clamp(img_copy, 0, 1)
return img_copy, slope_sqr_diff
@staticmethod
def adjust_hsv(img, S):
"""Adjust the HSV channels of a HSV image using learnt curves
:param img: image to be adjusted
:param S: predicted parameters of piecewise linear curves
:returns: adjust image, regularisation term
:rtype: Tensor, float
"""
img = img.squeeze(0).permute(2, 1, 0)
shape = img.shape
img = img.contiguous()
S1 = torch.exp(S[0:int(S.shape[0] / 4)])
S2 = torch.exp(S[(int(S.shape[0] / 4)):(int(S.shape[0] / 4) * 2)])
S3 = torch.exp(S[(int(S.shape[0] / 4) * 2):(int(S.shape[0] / 4) * 3)])
S4 = torch.exp(S[(int(S.shape[0] / 4) * 3):(int(S.shape[0] / 4) * 4)])
slope_sqr_diff = Variable(torch.zeros(1) * 0.0).cuda()
'''
Adjust Hue channel based on Hue using the predicted curve
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img, S1, slope_sqr_diff, channel_in=0, channel_out=0)
'''
Adjust Saturation channel based on Hue using the predicted curve
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img_copy, S2, slope_sqr_diff, channel_in=0, channel_out=1, same_channel=False)
'''
Adjust Saturation channel based on Saturation using the predicted curve
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img_copy, S3, slope_sqr_diff, channel_in=1, channel_out=1)
'''
Adjust Value channel based on Value using the predicted curve
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img_copy, S4, slope_sqr_diff, channel_in=2, channel_out=2)
img = img_copy.clone()
del img_copy
img[(img != img).detach()] = 0
img = img.permute(2, 1, 0)
img = img.contiguous()
return img, slope_sqr_diff
@staticmethod
def adjust_sv(img, S):
"""Adjust the HSV channels of a HSV image using learnt curves
:param img: image to be adjusted
:param S: predicted parameters of piecewise linear curves
:returns: adjust image, regularisation term
:rtype: Tensor, float
"""
img = img.squeeze(0).permute(2, 1, 0)
img = img.contiguous()
S3 = torch.exp(S[(int(S.shape[0] / 2) * 0):(int(S.shape[0] / 2) * 1)])
S4 = torch.exp(S[(int(S.shape[0] / 2) * 1):(int(S.shape[0] / 2) * 2)])
slope_sqr_diff = Variable(torch.zeros(1) * 0.0).cuda()
'''
Adjust Saturation channel based on Saturation using the predicted curve
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img, S3, slope_sqr_diff, channel_in=1, channel_out=1)
'''
Adjust Value channel based on Value using the predicted curve
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img_copy, S4, slope_sqr_diff, channel_in=2, channel_out=2)
img = img_copy.clone()
del img_copy
img[(img != img).detach()] = 0
img = img.permute(2, 1, 0)
img = img.contiguous()
return img, slope_sqr_diff
@staticmethod
def adjust_rgb(img, R):
"""Adjust the RGB channels of a RGB image using learnt curves
:param img: image to be adjusted
:param S: predicted parameters of piecewise linear curves
:returns: adjust image, regularisation term
:rtype: Tensor, float
"""
img = img.squeeze(0).permute(2, 1, 0)
shape = img.shape
img = img.contiguous()
'''
Extract the parameters of the three curves
'''
R1 = torch.exp(R[0:int(R.shape[0] / 3)])
R2 = torch.exp(R[(int(R.shape[0] / 3)):(int(R.shape[0] / 3) * 2)])
R3 = torch.exp(R[(int(R.shape[0] / 3) * 2):(int(R.shape[0] / 3) * 3)])
'''
Apply the curve to the R channel
'''
slope_sqr_diff = Variable(torch.zeros(1) * 0.0).cuda()
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img, R1, slope_sqr_diff, channel_in=0, channel_out=0)
'''
Apply the curve to the G channel
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img_copy, R2, slope_sqr_diff, channel_in=1, channel_out=1)
'''
Apply the curve to the B channel
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img_copy, R3, slope_sqr_diff, channel_in=2, channel_out=2)
img = img_copy.clone()
del img_copy
img[(img != img).detach()] = 0
img = img.permute(2, 1, 0)
img = img.contiguous()
return img, slope_sqr_diff
@staticmethod
def adjust_lab(img, L):
"""Adjusts the image in LAB space using the predicted curves
:param img: Image tensor
:param L: Predicited curve parameters for LAB channels
:returns: adjust image, and regularisation parameter
:rtype: Tensor, float
"""
img = img.permute(2, 1, 0)
shape = img.shape
img = img.contiguous()
'''
Extract predicted parameters for each L,a,b curve
'''
L1 = torch.exp(L[0:int(L.shape[0] / 3)])
L2 = torch.exp(L[(int(L.shape[0] / 3)):(int(L.shape[0] / 3) * 2)])
L3 = torch.exp(L[(int(L.shape[0] / 3) * 2):(int(L.shape[0] / 3) * 3)])
slope_sqr_diff = Variable(torch.zeros(1) * 0.0).cuda()
'''
Apply the curve to the L channel
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img, L1, slope_sqr_diff, channel_in=0, channel_out=0)
'''
Now do the same for the a channel
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img_copy, L2, slope_sqr_diff, channel_in=1, channel_out=1)
'''
Now do the same for the b channel
'''
img_copy, slope_sqr_diff = ImageProcessing.apply_curve(
img_copy, L3, slope_sqr_diff, channel_in=2, channel_out=2)
img = img_copy.clone()
del img_copy
img[(img != img).detach()] = 0
img = img.permute(2, 1, 0)
img = img.contiguous()
return img, slope_sqr_diff
| ZombaSY/Pore-Net-release | models/utils.py | utils.py | py | 27,246 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.Tensor",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.where",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.applyColorMap",
"l... |
5065913081 | """Transforms for preprocessing images during data loading"""
import PIL
import torch
import copy
import numpy as np
def img_pad(img, mode='warp', size=224):
"""
Pads a given image.
Crops and/or pads a image given the boundries of the box needed
img: the image to be coropped and/or padded
bbox: the bounding box dimensions for cropping
size: the desired size of output
mode: the type of padding or resizing. The modes are,
warp: crops the bounding box and resize to the output size
same: only crops the image
pad_same: maintains the original size of the cropped box and pads with zeros
pad_resize: crops the image and resize the cropped box in a way that the longer edge is equal to
the desired output size in that direction while maintaining the aspect ratio. The rest of the image is
padded with zeros
pad_fit: maintains the original size of the cropped box unless the image is biger than the size in which case
it scales the image down, and then pads it
"""
assert (mode in ['same', 'warp', 'pad_same', 'pad_resize', 'pad_fit']), 'Pad mode %s is invalid' % mode
image = img.copy()
if mode == 'warp':
warped_image = image.resize((size, size), PIL.Image.BICUBIC)
return warped_image
elif mode == 'same':
return image
elif mode in ['pad_same', 'pad_resize', 'pad_fit']:
img_size = image.size # size is in (width, height)
ratio = float(size) / max(img_size)
if mode == 'pad_resize' or \
(mode == 'pad_fit' and (img_size[0] > size or img_size[1] > size)):
img_size = tuple([int(img_size[0] * ratio), int(img_size[1] * ratio)])
image = image.resize(img_size, PIL.Image.BICUBIC)
padded_image = PIL.Image.new("RGB", (size, size))
padded_image.paste(image, ((size - img_size[0]) // 2,
(size - img_size[1]) // 2))
return padded_image
def squarify_bbox(bbox, squarify_ratio, img_width):
width = abs(bbox[0] - bbox[2])
height = abs(bbox[1] - bbox[3])
width_change = height * squarify_ratio - width
bbox[0] = bbox[0] - width_change / 2
bbox[2] = bbox[2] + width_change / 2
# Squarify is applied to bounding boxes in Matlab coordinate starting from 1
if bbox[0] < 0:
bbox[0] = 0
# check whether the new bounding box goes beyond image boarders
# If this is the case, the bounding box is shifted back
if bbox[2] > img_width:
# bbox[1] = str(-float(bbox[3]) + img_dimensions[0])
bbox[0] = bbox[0] - bbox[2] + img_width
bbox[2] = img_width
return bbox
def bbox_sanity_check(img, bbox):
"""
This is to confirm that the bounding boxes are within image boundaries.
If this is not the case, modifications is applied.
This is to deal with inconsistencies in the annotation tools
"""
img_width, img_heigth = img.size
if bbox[0] < 0:
bbox[0] = 0.0
if bbox[1] < 0:
bbox[1] = 0.0
if bbox[2] >= img_width:
bbox[2] = img_width - 1
if bbox[3] >= img_heigth:
bbox[3] = img_heigth - 1
return bbox
def jitter_bbox(img, bbox, mode, ratio):
"""
This method jitters the position or dimentions of the bounding box.
mode: 'same' returns the bounding box unchanged
'enlarge' increases the size of bounding box based on the given ratio.
'random_enlarge' increases the size of bounding box by randomly sampling a value in [0,ratio)
'move' moves the center of the bounding box in each direction based on the given ratio
'random_move' moves the center of the bounding box in each direction
by randomly sampling a value in [-ratio,ratio)
ratio: The ratio of change relative to the size of the bounding box. For modes 'enlarge' and 'random_enlarge'
the absolute value is considered.
Note: Tha ratio of change in pixels is calculated according to the smaller dimension of the bounding box.
"""
assert (mode in ['same', 'enlarge', 'move', 'random_enlarge', 'random_move']), \
'mode %s is invalid.' % mode
if mode == 'same':
return bbox
if mode in ['random_enlarge', 'enlarge']:
jitter_ratio = abs(ratio)
else:
jitter_ratio = ratio
if mode == 'random_enlarge':
jitter_ratio = np.random.random_sample() * jitter_ratio
elif mode == 'random_move':
# for ratio between (-jitter_ratio, jitter_ratio)
# for sampling the formula is [a,b), b > a,
# random_sample * (b-a) + a
jitter_ratio = np.random.random_sample() * jitter_ratio * 2 - jitter_ratio
if len(bbox) == 4:
b = copy.deepcopy(bbox)
bbox_width = b[2] - b[0]
bbox_height = b[3] - b[1]
width_change = bbox_width * jitter_ratio
height_change = bbox_height * jitter_ratio
if width_change < height_change:
height_change = width_change
else:
width_change = height_change
if mode in ['enlarge', 'random_enlarge']:
b[0] = b[0] - width_change // 2
b[1] = b[1] - height_change // 2
else:
b[0] = b[0] + width_change // 2
b[1] = b[1] + height_change // 2
b[2] = b[2] + width_change // 2
b[3] = b[3] + height_change // 2
# Checks to make sure the bbox is not exiting the image boundaries
jit_box = bbox_sanity_check(img, b)
return jit_box
def crop_and_rescale(image, bbox, cropping_ratio, width, height):
"""
Crop the top 1/n of the image and resize the image to desired size.
The bbox are preprocessed accordingly.
"""
w, h = image.size
image = image.crop((0, h * cropping_ratio, w, h))
# rescale
image_new = image.resize((width, height), PIL.Image.BICUBIC)
# compute new bbox
scale_x = width / w
scale_y = height / h / (1 - cropping_ratio)
x1 = bbox[0] * scale_x
y1 = (bbox[1] - h * cropping_ratio) * scale_y
x2 = bbox[2] * scale_x
y2 = (bbox[3] - h * cropping_ratio) * scale_y
bbox_new = [int(x1), int(y1), int(x2), int(y2)]
return image_new, bbox_new
def random_flip(image, bbox, probability):
"""
Flip the image horizontally with given probability.
When the image is flipped,
the bbox annotation wll also be transformed correspondingly.
"""
if float(torch.rand(1).item()) < probability:
image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
w, h = image.size
# box_w = abs(bbox[0] - bbox[2])
x_max = w - bbox[0]
x_min = w - bbox[2]
bbox[0] = x_min
bbox[2] = x_max
return image, bbox
| DongxuGuo1997/TransNet | src/transform/transforms.py | transforms.py | py | 6,739 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "PIL.Image",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.new",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_num... |
42894401315 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 16 17:57:13 2019
@author: jcunanan
"""
#######################################
# Problem Description
#Bob the Adventurer is one step away from solving the mystery of an ancient Mayan tomb.
#He just approched the secret chamber where the secret Mayan scriptures are locked in a chest.
#There are N ancient statues in the room.
#After long thought, Bob figured out that in order to open the treasure chest
#he needs to stand in the middle of the room and hit every statue with a laser ray at the same time.
#Bob is a highly experienced adventurer, so setting multiple laser rays at the same time is not a problem for him.
#Moreover, every ray that he creates is perfectly straight and never changes direction at all.
#The middle of the room, where Bob is standing, has coordinates (0, 0).
#Every statue is located at some point with coordinates (x, y).
#Each statue is made of pure glass, so that if any ray hits it, it does not stop,
#but goes through the statue and continues beyond in the same, unchanged direction.
#Bob wonders how he can hit every ancient statue in the room using the fewest rays possible.
#Assume that the following declarations are given:
#class Point2D { public int x; public int y; }
#Write a function
#class Solution { public int solution(Point2D[] A); }
#that, given an array of points A, representing the locations of the statues,
#returns the minimal number of rays that Bob must set in order to hit every statue in the room.
#For example, given an array A
#A[0].x = -1 A[0].y = -2 (statue 0) A[1].x = 1 A[1].y = 2 (statue 1) A[2].x = 2 A[2].y = 4 (statue 2) A[3].x = -3 A[3].y = 2 (statue 3) A[4].x = 2 A[4].y = -2 (statue 4)
#your function should return 4.
#https://i.stack.imgur.com/ad5gc.png
#As is shown in the image, it is possible to create four rays in such a way that:
#the first will hit statue 0;
#the second will hit statues 1 and 2;
#the third will hit statue 3;
#the fourth will hit statue 4.
#Assume that:
#N is an integer within the range [1..100,000];
#the coordinates of each point in array A are integers within the range [−1,000,000,000..1,000,000,000];
#the elements of A are all distinct;
#Array A does not contain point (0,0).
#Complexity:
#expected worst-case time complexity is O(N);
#expected worst-case space complexity is O(N*log(N)), beyond input storage (not counting the storage required for input arguments).
#from extratypes import Point2D # library with types used in the task
from sympy import Point2D
def solution(A):
# write your code in Python 3.6
N = len(A)
slopeE = []
slopeW = []
sN = 0
sS = 0
for i in range(N):
if Point2D(A[i]).x == 0 and Point2D(A[i]).y > 0:
sN = 1
elif Point2D(A[i]).x == 0 and Point2D(A[i]).y < 0:
sS = 1
if Point2D(A[i]).x >0 :
if Point2D(A[i]).y / Point2D(A[i]).x not in slopeE:
slopeE.append(Point2D(A[i]).y / Point2D(A[i]).x)
if Point2D(A[i]).x < 0:
if Point2D(A[i]).y / Point2D(A[i]).x not in slopeW:
slopeW.append(Point2D(A[i]).y / Point2D(A[i]).x)
return len(slopeE)+len(slopeW)+sN+sS
| j-cunanan/Fun-Algorithm-Problems | Destroy_all_statues.py | Destroy_all_statues.py | py | 3,311 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sympy.Point2D",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "sympy.Point2D",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "sympy.Point2D",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sympy.Point2D",
"line_n... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.