seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
36127467524 | import json
from typing import Dict
from kafka import KafkaConsumer
from main import StationStatus, Station
station_status = dict()
if __name__ == '__main__':
consumer = KafkaConsumer(
'city_bike_topic',
bootstrap_servers = ['localhost:9092'],
auto_offset_reset='earliest',
value_deserializer=lambda x: json.loads(x.decode('utf-8'))
)
for message in consumer:
if message is not None:
#print(message.value)
message = message.value
# station_status['last_updated'] = message['last_updated']
# i = 0
# for station in message['data']['stations']:
# station_status['station_id'] = station['station_id']
# station_status['num_bikes_available'] = station['num_bikes_available']
# station_status['num_docks_available'] = station['num_docks_available']
# print(station_status)
station_status = StationStatus(last_updated=message['last_updated'], stations=message['stations'])
print(station_status)
#print(consumer.topics()) | Kelvingandhi/kafka_sample | city_bike_consumer.py | city_bike_consumer.py | py | 1,167 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "kafka.KafkaConsumer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "main.StationStatus",
"line_number": 32,
"usage_type": "call"
}
] |
41272460993 | from imutils import paths
import face_recognition
import os
from shutil import copy
from PIL import Image, ImageDraw
from tkinter import Tk
from tkinter.filedialog import askopenfilename
Tk().withdraw()
filename = askopenfilename()
obama = face_recognition.load_image_file(filename)
folder = 'obama'
obamaface_encoding = face_recognition.face_encodings(obama)[0]
path = 'images/'
images = []
for file in os.listdir(path):
if file.endswith(".jpg"):
images.append(os.path.join(path, file))
isExist = os.path.exists(folder)
if not isExist:
os.makedirs(folder)
for file_name in images:
newPic = face_recognition.load_image_file(file_name)
for face_encoding in face_recognition.face_encodings(newPic):
results = face_recognition.compare_faces([obamaface_encoding], face_encoding, 0.5)
if results[0] == True:
copy(file_name, "./obama/" + file_name.split("/")[1])
# unknown_picture = face_recognition.load_image_file("2.jpg")
# unknown_face_encoding = face_recognition.face_encodings(unknown_picture)[0]
# results = face_recognition.compare_faces([obamaface_encoding], unknown_face_encoding)
# if results[0] == True:
# print("It's a picture of obama!")
# else:
# print("It's not a picture of obama!") | SankojuRamesh/face_recognation | fr.py | fr.py | py | 1,295 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.Tk",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askopenfilename",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "face_recognition.load_image_file",
"line_number": 12,
"usage_type": "call"
},
{
"ap... |
31792102222 | # coding:utf-8
import sys
import window
from PyQt5.QtWidgets import QApplication, QDialog
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
# import pymysql
import threading
import pymysql
path = "./"
class Controller:
def __init__(self):
pass
def show_login(self):
self.login = LoginDialog()
self.login.switch_window.connect(self.show_main)
self.login.show()
def show_main(self):
self.login.close()
self.window = MainDialog()
self.window.switch_window.connect(self.shutdown)
self.window.show()
from shadow import shadow;
self.p = threading.Thread(target=shadow)
# 设置为守护进程,当父进程结束时,将被强制终止
self.p.daemon = True
self.p.start()
def shutdown(self):
print("-------- 结束接收数据 -----------")
sys.exit()
class MainDialog(QDialog):
switch_window = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(QDialog, self).__init__(parent)
self.ui = window.Ui_Dialog_Main()
self.setWindowIcon(QIcon(path + "logo.ico"))
self.ui.setupUi(self)
# 传递信号,调用新一层函数
def close(self):
self.switch_window.emit()
def ask(self):
query = self.ui.textEdit.toPlainText().strip()
print("收到询问: " + query)
from shadow import chat
back = chat(query)
print("处理结果: " + back)
self.ui.textEdit.setText(back)
class LoginDialog(QDialog):
switch_window = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(QDialog, self).__init__(parent)
self.ui = window.Ui_Dialog_Login()
self.setWindowIcon(QIcon(path + "logo.ico"))
self.ui.setupUi(self)
# 调用后端接口登录判断
def verily(self, name, email):
conn = pymysql.connect(host = '43.163.218.127' # 连接名称,默认127.0.0.1
,user = 'root' # 用户名
,passwd='011026' # 密码
,port= 3306 # 端口,默认为3306
,db='aides' # 数据库名称
,charset='utf8' # 字符编码
)
cur = conn.cursor() # 生成游标对象
sql = "select * from `user` where `name`= " + '\'' + name + '\'' # SQL语句
#print(sql)
cur.execute(sql) # 执行SQL语句
data = cur.fetchall() # 通过fetchall方法获得数据
cur.close()
conn.close()
if len(data) > 1 or len(data) == 0:
return False
elif data[0][1] != email:
return False
return True
def write_conf(self, name, email, pwd, mode):
with open(path+"shadow.conf", 'w') as f:
f.write("name: " + name + "\n")
f.write("email: " + email + "\n")
f.write("password: " + pwd + "\n")
f.write("mode: " + mode + "\n")
def start(self):
name = self.ui.name.text()
email = self.ui.email.text()
pwd = self.ui.pwd.text()
mode = self.ui.mode.text()
if self.verily(name, email):
self.write_conf(name, email, pwd, mode)
# 跳转主页面
self.switch_window.emit()
def clear(self):
self.ui.name.clear()
self.ui.email.clear()
self.ui.pwd.clear()
if __name__ == '__main__':
myapp = QApplication(sys.argv)
myDlg = Controller()
myDlg.show_login()
sys.exit(myapp.exec_())
| northboat/Aides | app/app.py | app.py | py | 3,509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "threading.Thread",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "shadow.shadow",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QDialog",
... |
37407108595 | from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.linear_model import LinearRegression
# dummy data
X, y = make_classification(n_samples=1000, n_features=10,
n_informative=5, n_redundant=0,
random_state=0, shuffle=False)
# splitting dataset into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# models
linear = LinearRegression()
xgb_ = xgb.XGBRegressor()
forest = RandomForestClassifier()
# training models on training data
linear.fit(X_train, y_train)
xgb_.fit(X_train, y_train)
forest.fit(X_train, y_train)
# predictions for each model
pred_1 = linear.predict(X_test)
pred_2 = xgb_.predict(X_test)
pred_3 = forest.predict(X_test)
# see what we are working with
print("this is pred_1: ", pred_1)
print("this is the length of pred_1: ", len(pred_1))
# MSE for individual models
print("MSE pred_1:", mean_squared_error(y_test, pred_1))
print("MSE pred_2:", mean_squared_error(y_test, pred_2))
print("MSE pred_3:", mean_squared_error(y_test, pred_3))
# averaging model predicitions
final = (pred_1 + pred_2 + pred_3)/3
# MSE for ensemble model
print("Final MSE:", mean_squared_error(y_test, final))
| HyperionDevBootcamps/C4_DS_lecture_examples | Lecture code/Machine Learning/Decision Trees/Ensemble.py | Ensemble.py | py | 1,443 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 17,
"... |
28834678402 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 14:34:04 2015
@author: 89965
fonctions de structurelles diverses
"""
import os
import re
import logging
import subprocess
from collections import defaultdict
import psutil
import pyetl.formats.formats as F
import pyetl.formats.mdbaccess as DB
from .outils import charge_mapping, remap, prepare_elmap, renseigne_attributs_batch
LOGGER = logging.getLogger('pyetl')
def map_struct(regle):
"""mappe la structure clef etrangeres et fonctions"""
charge_mapping(regle, mapping=regle.schema.mapping)
def _map_schemas(regle, obj):
'''essaye de trouver un mapping pour une classe'''
if obj is None:
if regle.getvar("schema_entree"):
schema_origine = regle.stock_param.schemas[regle.getvar("schema_entree")]
print('-------------------------mapping', schema_origine)
# else:
# return
# if regle.params.val_entree.val:
# schema2 = regle.stock_param.init_schema(regle.params.val_entree.val,
# modele=schema_origine, origine='B')
# else:
return
else:
schema_origine = obj.schema.schema
if regle.params.val_entree.val:
schema2 = regle.stock_param.init_schema(regle.params.val_entree.val,
modele=schema_origine, origine='B')
else:
schema2 = obj.schema.schema
regle.schema = schema2
if schema2.elements_specifiques:
for i in schema2.elements_specifiques:
# print('mapping specifique', i)
spec = schema2.elements_specifiques[i]
mapped = remap(spec, regle.elmap)
# print('mapping specifique', i, len(spec), '->', len(mapped))
schema2.elements_specifiques[i] = mapped
else:
LOGGER.info("pas d'elements specifiques")
# print("-----------------------------pas d'elements specifiques")
for i in schema_origine.classes:
schema2.get_classe(i, modele=schema_origine.classes[i], cree=True)
for i in list(schema_origine.classes.keys()):
# print ('map_schemas ',schema_origine.nom,i,regle.mapping.get(i))
if i in regle.mapping:
schema2.renomme_classe(i, regle.mapping[i])
# mapping foreign keys :
# print("mapping effectue", len(schema2.classes))
for clef in schema2.classes:
if clef in regle.mapping_attributs:
for orig, dest in regle.mapping_attributs[clef].items():
schema2.classes[clef].rename_attribut(orig, dest)
def applique_mapping(regle):
"""gere les clefs etrangeres et les elements speciaux dans les mappings"""
mapping = regle.schema.mapping
regle.elmap = prepare_elmap(mapping)
_map_schemas(regle, None)
regle.nbstock = 0
for i in mapping:
for scl in regle.schema.classes.values():
scl.renomme_cible_classe(i, mapping[i])
def h_map2(regle):
""" prepare le mapping des structures"""
regle.store = True
regle.blocksize = 1
regle.nbstock = 0
regle.traite_stock = applique_mapping
def f_map2(regle, obj):
'''#aide||mapping en fonction d'une creation dynamique de schema
#aide_spec||parametres: mappe les structures particulieres
#pattern2||;;;map;=#struct;;
'''
regle.schema = obj.schema.schema
regle.nbstock = 1
def h_map(regle):
''' precharge le fichier de mapping et prepare les dictionnaires'''
regle.dynlevel = 0 # les noms de mapping dependent ils des donnees d entree
regle.mapping = None
regle.schema = None
# if regle.params.att_sortie.val == '#schema': # mapping d un schema existant
# schema2 =
regle.changeschema = True
fich = regle.params.cmp1.val
if "[F]" in fich:
regle.dynlevel = 2
elif "[C]" in fich:
regle.dynlevel = 1
if regle.dynlevel:
regle.clefdyn = ""
else:
charge_mapping(regle)
_map_schemas(regle, None)
def f_map(regle, obj):
'''#aide||mapping en fonction d'un fichier
#aide_spec||parametres: map; nom du fichier de mapping
#aide_spec2||si #schema est indique les objets changent de schema
#pattern||?=#schema;?C;;map;C;;
#test||obj||^#schema;test;;map;%testrep%/refdata/map.csv;;||atv;toto;A
#test2||obj||^#schema;test;;map+-;%testrep%/refdata/map.csv;;||cnt;2
'''
# print ("dans map ===============",obj)
if regle.dynlevel: # attention la regle est dynamique
clef_dyn = regle.stock_param.chemin_courant if regle.dynlevel == 1\
else regle.stock_param.fichier_courant
if clef_dyn != regle.clef_dyn:
charge_mapping(regle)
if not regle.schema:
_map_schemas(regle, obj)
clef = obj.ident
schema2 = regle.schema
if clef in regle.mapping:
nouv = regle.mapping.get(clef)
obj.setident(nouv, schema2=schema2)
if clef in regle.mapping_attributs:
for orig, dest in regle.mapping_attributs[clef].items():
try:
obj.attributs[dest] = obj.attributs[orig]
del obj.attributs[orig]
except KeyError:
obj.attributs[dest] = ''
return True
# print ('====================== mapping non trouve', clef)
# print ('definition mapping', '\n'.join([str(i)+':\t\t'+str(regle.mapping[i])
# for i in sorted(regle.mapping)]))
return False
def store_traite_stock(regle):
''' relache les objets '''
store = regle.tmpstore
reverse = regle.params.cmp2.val == 'rsort'
# print ("tri inverse ",reverse)
if isinstance(store, list):
if regle.params.cmp2.val:
keyval = lambda obj: "|".join(obj.attributs.get(i, '')
for i in regle.params.att_entree.liste)
store.sort(key=keyval, reverse=reverse)
for obj in store:
# print ('store: relecture objet ', obj, obj.schema.identclasse,obj.schema.info)
regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["end:"])
else:
for clef in sorted(store.keys(), reverse=reverse) if regle.params.cmp2.val else store:
obj = store[clef]
regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["end:"])
h_stocke(regle) # on reinitialise
def h_stocke(regle):
'''marque la regle comme stockante'''
# print ('stockage tmpstore ', regle.params.att_entree.liste)
regle.store = True
regle.stocke_obj = True # on stocke les objets et pas que la clef
regle.nbstock = 0
regle.traite_stock = store_traite_stock
regle.tmpstore = dict() if regle.params.cmp1.val else list()
# mode comparaison : le stock est reutilise ailleurs (direct_reuse)=False
regle.direct_reuse = not 'cmp' in regle.params.cmp1.val
regle.fold = regle.params.cmp1.val == 'cmpf'
if regle.params.cmp2.val == 'clef':
regle.stocke_obj = False
regle.tmpstore = set()
def f_stocke(regle, obj):
'''#aide||stockage temporaire d'objets pour assurer l'ordre dans les fichiers de sortie
#aide_spec||liste de clefs,tmpstore;uniq;sort|rsort : stockage avec option de tri
#aide_spec2||liste de clefs,tmpstore;cmp;nom : prechargement pour comparaisons
#pattern1||;;?L;tmpstore;?=uniq;?=sort;||
#pattern2||;;?L;tmpstore;?=uniq;?=rsort;||
#pattern3||;;?L;tmpstore;=cmp;A;?=clef||
#pattern4||;;?L;tmpstore;=cmpf;A;?=clef||
#test||obj;point;4||^;;V0;tmpstore;uniq;rsort||^;;C1;unique||atv;V0;3;
#test2||obj;point;4||^V2;;;cnt;-1;4;||^;;V2;tmpstore;uniq;sort||^;;C1;unique;||atv;V2;1;
'''
# regle.stock.append(obj)
if obj.virtuel:
return True
if regle.direct_reuse:
regle.nbstock += 1
if regle.params.cmp1.val:
if len(regle.params.att_entree.liste) > 1:
clef = "|".join(obj.attributs.get(i, '') for i in regle.params.att_entree.liste)
else:
clef = obj.attributs.get(regle.params.att_entree.val, '')
if regle.stocke_obj:
regle.tmpstore[clef] = obj
else:
regle.tmpstore.add(obj)
return True
# print ('store: stockage objet ', obj, obj.schema.identclasse,obj.schema.info)
regle.tmpstore.append(obj)
return True
def h_uniq(regle):
''' stocke les clefs pour l'unicite '''
regle.tmpstore = set()
def f_uniq(regle, obj):
'''#aide||unicite de la sortie laisse passer le premier objet et filtre le reste
#aide_spec||liste des attibuts devant etre uniques si #geom : test geometrique
#pattern||;?=#geom;?L;unique;;;
#test||obj;point;2||^;;C1;unique||+fail:;;;;;;;pass>;;||cnt;1
#test2||obj;point;2||^;;C1;unique-||cnt;1
#test3||obj;point;2||^;#geom;;unique-||cnt;1
#test4||obj;point;2||^;#geom;C1;unique-||cnt;1
'''
# regle.stock.append(obj)
clef = str(tuple(tuple(i) for i in obj.geom_v.coords))\
if regle.params.val_entree.val == '#geom' else ''
clef = clef + "|".join(obj.attributs.get(i, '') for i in regle.params.att_entree.liste)
# print ('uniq ',clef, regle.params.att_entree.val )
if clef in regle.tmpstore:
return False
regle.tmpstore.add(clef)
return True
def h_uniqcnt(regle):
''' stocke les clefs pour l'unicite '''
regle.maxobj = regle.params.cmp1.num if regle.params.cmp1.num else 1
regle.cnt = regle.maxobj > 1
regle.tmpstore = defaultdict(int)
def f_uniqcnt(regle, obj):
'''#aide||unicite de la sortie laisse passer les N premiers objet et filtre le reste
#pattern||A;?=#geom;?L;unique;?N;||sortie
#schema||ajout_attribut
#test||obj;point;4||^X;;C1;unique;2;||+fail:;;;;;;;pass>;;||cnt;2
#test2||obj;point;4||^X;;C1;unique-;2;||cnt;2
#test3||obj;point;4||^X;#geom;;unique-;2;||cnt;2
#test4||obj;point;4||^X;#geom;C1;unique-;2;||cnt;2
#test4||obj;point;4||V0;1;;;V0;2;;set;;;||^X;#geom;V0;unique>;1;;||cnt;1
'''
# regle.stock.append(obj)
clef = str(tuple(tuple(i) for i in obj.geom_v.coords))\
if regle.params.val_entree.val == '#geom' else ''
clef = clef + "|".join(obj.attributs.get(i, '') for i in regle.params.att_entree.liste)
regle.tmpstore[clef] += 1
obj.attributs[regle.params.att_sortie.val] = str(regle.tmpstore[clef])
if regle.tmpstore[clef] > regle.maxobj:
return False
return True
def sortir_traite_stock(regle):
'''ecriture finale'''
print('traite stock sortir', regle.final)
if regle.final:
regle.f_sortie.ecrire_objets(regle, True)
regle.nbstock = 0
return
for groupe in list(regle.stockage.keys()):
for obj in regle.recup_objets(groupe):
regle.f_sortie.ecrire_objets_stream(obj, regle, False)
regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["end:"])
regle.nbstock = 0
def h_sortir(regle):
'''preparation sortie'''
if regle.params.att_sortie.val == "#schema": # on force les noms de schema pour l'ecriture
regle.nom_fich_schema = regle.params.val_entree.val
else:
regle.nom_fich_schema = regle.params.cmp2.val
regle.nom_base = os.path.basename(regle.params.cmp2.val
if regle.params.cmp2.val else regle.nom_fich_schema)
if regle.debug:
print("nom de schema ", regle.nom_fich_schema)
if '[' in regle.params.cmp1.val: # on a defini un fanout
tmplist = regle.params.cmp1.val.find('[')
#print("valeur ii ", regle.params.cmp1,ii)
regle.setvar("fanout", regle.params.cmp1.val[tmplist+1:-1])
regle.params.cmp1.val = regle.params.cmp1.val[:tmplist]
regle.f_sortie = F.Writer(regle.params.cmp1.val) # tout le reste
# print ('positionnement writer ',regle, regle.params.cmp1.val)
if regle.f_sortie.nom_format == 'sql': # gestion des dialectes sql et du mode connecté
destination = regle.f_sortie.writerparms.get('destination')
dialecte = regle.f_sortie.writerparms.get('dialecte')
regle.f_sortie.writerparms['reinit'] = regle.getvar('reinit')
regle.f_sortie.writerparms['nodata'] = regle.getvar('nodata')
if destination: # on va essayer de se connecter
connection = DB.dbaccess(regle.stock_param, destination)
if connection.valide:
regle.f_sortie.gensql = connection.gensql # la on a une instance connectee
elif dialecte:
regle.f_sortie.gensql = dialecte.gensql()
# print ('sortie',regle.ligne,regle.f_sortie.writerparms)
elif regle.f_sortie.nom_format == 'file': #gestion de fichiers de texte generiques
dialecte = regle.f_sortie.writerparms.get('dialecte')
regle.ext = dialecte
if regle.params.cmp2.val and regle.params.cmp2.val != "#print":
rep_base = regle.getvar('_sortie', loc=0)
# print('positionnement sortie', rep_base, os.path.join(rep_base, regle.params.cmp2.val))
regle.setvar('_sortie', os.path.join(rep_base, regle.params.cmp2.val), loc=1)
regle.fanout = regle.getvar("fanout", 'groupe')\
if regle.f_sortie.multiclasse else 'classe'
# print("fanout de sortie",regle.fanout)
regle.calcule_schema = regle.f_sortie.calcule_schema
regle.memlimit = int(regle.getvar('memlimit', 0))
regle.store = None
regle.nbstock = 0
regle.traite_stock = sortir_traite_stock
# regle.liste_attributs = regle.params.att_entree.liste
if regle.stock_param.debug:
print('sortir :', regle.params.att_entree.liste)
regle.final = True
regle.menage = True
#print ('icsv: sortir copy:',regle.copy,'stream:',regle.stock_param.stream)
if regle.copy and regle.getvar("mode_sortie", "A") == "D":
# cette regle consomme les objets sauf si on est en mode copie et streaming
regle.final = False
regle.copy = False
regle.valide = True
# print ('fin preparation sortie ',regle.f_sortie.writerparms)
def setschemasortie(regle, obj):
'''positionne le schema de sortie pour l objet '''
if regle.nom_fich_schema:# on copie le schema pour ne plus le modifier apres ecriture
regle.change_schema_nom(obj, regle.nom_fich_schema)
if obj.schema and obj.schema.amodifier(regle):
obj.schema.setsortie(regle.f_sortie, os.path.join(regle.getvar('_sortie'),
os.path.dirname(regle.params.cmp1.val)))
obj.schema.setminmaj(regle.f_sortie.minmaj)
if regle.params.att_entree.liste:
obj.liste_atttributs = regle.params.att_entree.liste
def f_sortir(regle, obj):
'''#aide||sortir dans differents formats
#aide_spec||parametres:?(#schema;nom_schema);?liste_attributs;sortir;format[fanout]?;?nom
#pattern||?=#schema;?C;?L;sortir;?C;?C||sortie
#test||redirect||obj||^Z;ok;;set||^;;;sortir;csv;#print||end
'''
if obj.virtuel: # on ne traite pas les virtuels
return True
listeref = obj.liste_attributs
schemaclasse_ref = obj.schema
setschemasortie(regle, obj)
if regle.store is None: # on decide si la regle est stockante ou pas
regle.store = regle.f_sortie.calcule_schema and\
(not obj.schema or not obj.schema.stable)
if regle.store: # on ajuste les branchements
regle.setstore()
if regle.store:
regle.nbstock += 1
groupe = obj.attributs["#groupe"]
# print("stockage", obj.ido, groupe, regle)
if groupe != "#poubelle":
nom_base = regle.nom_base
#regle.stock_param.nb_obj+=1
if regle.stock_param.stream: #sortie classe par classe
if groupe not in regle.stockage:
regle.f_sortie.ecrire_objets(regle, False) # on sort le groupe precedent
regle.compt_stock = 0
regle.endstore(nom_base, groupe, obj, regle.final,
geomwriter=regle.f_sortie.tmp_geom, nomgeom=regle.f_sortie.nom_fgeo)
return True
regle.f_sortie.ecrire_objets_stream(obj, regle, False)
obj.schema = None
if regle.final:
return True
# la on regenere l'objet et on l'envoie dans le circuit poutr la suite
obj.setschema(schemaclasse_ref)
obj.liste_attributs = listeref
# on reattribue le schema pour la sortie en simulant une copie
return True
def valreplace(chaine, obj):
'''remplace les elements provenant de l objet '''
vdef = r'\[(#?[a-zA-Z_][a-zA-Z0-9_]*)\]'
repl = lambda x: obj.attributs.get(x.group(1), '')
return re.sub(vdef, repl, chaine)
def preload(regle, obj):
'''prechargement'''
vrep = lambda x: regle.resub.sub(regle.repl, x)
chaine_comm = vrep(regle.params.cmp1.val)
regle.setvar('nocomp', False)
process = psutil.Process(os.getpid())
mem1 = process.memory_info()[0]
if obj and regle.params.att_entree.val:
entree = obj.attributs.get(regle.params.att_entree.val, regle.fich)
else:
entree = regle.entree if regle.entree else valreplace(regle.fich, obj)
print('------- preload commandes:(', chaine_comm, ') f:', entree,
'clef', regle.params.att_sortie.val)
if chaine_comm: # on precharge via une macro
nomdest = regle.params.cmp2.val if regle.params.cmp2.val.startswith('#') \
else '#'+ regle.params.cmp2.val
processor = regle.stock_param.getpyetl(chaine_comm, entree=entree, rep_sortie=nomdest)
processor.process()
renseigne_attributs_batch(regle, obj, processor.retour)
print('------- preload ', processor.store)
regle.stock_param.store.update(processor.store) # on rappatrie les dictionnaires de stockage
regle.setvar('storekey', processor.retour) # on stocke la clef
else:
# racine = regle.stock_param.racine
chemin = os.path.dirname(entree)
fichier = os.path.basename(entree)
ext = os.path.splitext(fichier)[1]
lecteur = regle.stock_param.reader(ext)
regle.reglestore.tmpstore = dict()
nb_total = 0
try:
nb_total = lecteur.lire_objets('', chemin, fichier, regle.stock_param,
regle.reglestore)
regle.stock_param.store[regle.params.cmp2.val] = regle.reglestore.tmpstore
except FileNotFoundError:
regle.stock_param.store[regle.params.cmp2.val] = None
print('fichier inconnu', os.path.join(chemin, fichier))
mem2 = process.memory_info()[0]
mem = mem2-mem1
print('------- preload ', nb_total, mem, '--------', int(mem/(nb_total+1)))
def h_preload(regle):
'''prechargement'''
obj = None
mapper = regle.stock_param
reglestore = mapper.interpreteur(";;;;;;"+regle.params.att_sortie.val+
";tmpstore;cmp;"+regle.params.cmp2.val, "", 99999)
regle.reglestore = reglestore
regle.repl = lambda x: obj.attributs.get(x.group(1), '')
regle.resub = re.compile(r'\[(#?[a-zA-Z_][a-zA-Z0-9_]*)\]')
fich = regle.params.val_entree.val
# fich = fich.replace('[R]', regle.stock_param.racine)
regle.fich = fich
regle.dynlevel = 0
if '[R]' in fich:
regle.dynlevel = 1
if "[F]" in fich:
regle.dynlevel = 2
elif "[G]" in fich:
regle.dynlevel = 1
elif "[" in fich:
regle.dynlevel = 3
regle.entree = None
if regle.dynlevel == 0: # pas de selecteur on precharge avant de lire
regle.entree = regle.params.val_entree.val
regle.fich = regle.entree
preload(regle, None)
regle.valide = "done"
print('==================h_preload===', regle.dynlevel, regle.valide)
def f_preload(regle, obj):
'''#aide||precharge un fichier en appliquant une macro
#aide_spec||parametres clef;fichier;attribut;preload;macro;nom
#aide_spec1||les elements entre [] sont pris dans l objet courant
#aide_spec2||sont reconnus[G] pour #groupe et [F] pour #classe pour le nom de fichier
#pattern||A;?C;?A;preload;?C;C
#!test||
'''
fich = regle.fich
if regle.dynlevel > 0:
fich = fich.replace('[G]', obj.attributs['#groupe'])
fich = fich.replace('[R]', regle.stock_param.racine)
fich = fich.replace('[F]', obj.attributs['#classe'])
if fich != regle.entree:
regle.entree = fich
print('==================f_preload===', regle.stock_param.racine, regle.entree)
preload(regle, obj)
# print ('chargement ',regle.params.cmp2.val,
# regle.stock_param.store[regle.params.cmp2.val])
return True
def compare_traite_stock(regle):
""" sort les objets detruits"""
for obj in regle.comp.values():
obj.attributs[regle.params.att_sortie.val] = 'supp'
obj.setident(regle.precedent)
regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["supp:"])
regle.comp = None
regle.nbstock = 0
#def compare_traite_stock(regle):
# """ sort les objets detruits"""
# for clef, obj in regle.comp.items():
# if obj.redirect is None:
# obj.attributs[regle.params.att_sortie.val]='supp'
# regle.stock_param.moteur.traite_objet(obj, regle.branchements.brch["supp:"])
# regle.comp[clef] = None
# regle.comp = None
# regle.nbstock = 0
def h_compare(regle):
"""comparaison a une reference"""
regle.branchements.addsortie('new:')
regle.branchements.addsortie('supp:')
regle.branchements.addsortie('diff:')
regle.branchements.addsortie('orig:')
# regle.taites = set()
regle.store = True
regle.nbstock = 0
regle.comp = None
regle.precedent = None
regle.traite_stock = compare_traite_stock
def f_compare2(regle, obj):
'''#aide||compare a un element precharge
#aide_spec||parametres clef;fichier;attribut;preload;macro;nom
#aide_spec2||sort en si si egal en sinon si different
#aide_spec3||si les elements entre [] sont pris dans l objet courant
#pattern||A;;?L;compare2;A;C
#helper||compare
#schema||ajout_attribut
#!test||
'''
if regle.precedent != obj.ident:
comp = regle.stock_param.store[regle.params.cmp2.val]
if regle.comp and comp is not regle.comp:
compare_traite_stock(regle)
regle.nbstock = 1
regle.comp = comp
if regle.comp:
if regle.params.att_entree.liste:
regle.comp2 = {i:([i.attributs[j] for j in
regle.params.att_entree.liste]) for i in regle.comp}
else:
regle.comp2 = {i:([i.attributs[j] for j in
sorted([k for k in i.attributs if k[0] != "#"])])
for i in regle.comp}
# print ('comparaison ', len(regle.comp), regle.comp)
try:
if len(regle.params.cmp1.liste) > 1:
clef = "|".join(obj.attributs.get(i, '') for i in regle.params.att_entree.liste)
else:
clef = obj.attributs[regle.params.cmp1.val]
ref = regle.comp2[clef]
regle.ref.add(clef)
except KeyError:
obj.redirect = "new:"
obj.attributs[regle.params.att_sortie.val] = 'new'
return False
if regle.params.att_entree.liste:
compare = all([obj.attributs[i] == ref.attributs[i]
for i in regle.params.att_entree.liste])
else:
atts = {i for i in obj.attributs if i[0] != "#"}
kref = {i for i in ref.attributs if i[0] != "#"}
# id_att = atts == kref
compare = atts == kref and all([obj.attributs[i] == ref.attributs[i]
for i in atts]) and obj.geom == ref.geom
if compare:
return True
obj.redirect = "diff:"
obj.attributs[regle.params.att_sortie.val] = 'diff'
ref.attributs[regle.params.att_sortie.val] = 'orig'
regle.stock_param.moteur.traite_objet(ref, regle.branchements.brch["orig:"])
# on remet l'original dans le circuit
return False
def f_compare(regle, obj):
'''#aide||compare a un element precharge
#aide_spec||parametres clef;fichier;attribut;preload;macro;nom
#aide_spec2||sort en si si egal en sinon si different
#aide_spec3||si les elements entre [] sont pris dans l objet courant
#pattern||A;;?L;compare;A;C
#schema||ajout_attribut
#!test||
'''
if regle.precedent != obj.ident: # on vient de changer de classe
if regle.comp:
compare_traite_stock(regle)
regle.nbstock = 1
regle.comp = regle.stock_param.store[regle.params.cmp2.val]
regle.precedent = obj.ident
# print ('comparaison ', len(regle.comp), regle.comp)
if regle.comp is None:
return False
try:
if len(regle.params.cmp1.liste) > 1:
clef = "|".join(obj.attributs.get(i, '')
for i in regle.params.att_entree.liste)
else:
clef = obj.attributs[regle.params.cmp1.val]
ref = regle.comp.pop(clef)
except KeyError:
obj.redirect = "new:"
obj.attributs[regle.params.att_sortie.val] = 'new'
return False
if regle.params.att_entree.liste:
compare = all([obj.attributs[i] == ref.attributs[i]
for i in regle.params.att_entree.liste])
else:
atts = {i for i in obj.attributs if i[0] != "#"}
kref = {i for i in ref.attributs if i[0] != "#"}
# id_att = atts == kref
compare = atts == kref and all([obj.attributs[i] == ref.attributs[i]
for i in atts]) and obj.geom == ref.geom
if compare:
return True
obj.redirect = "diff:"
obj.attributs[regle.params.att_sortie.val] = 'diff'
ref.attributs[regle.params.att_sortie.val] = 'orig'
ref.setident(obj.ident) # on force l'identite de l'original
regle.stock_param.moteur.traite_objet(ref, regle.branchements.brch["orig:"])
# on remet l'original dans le circuit
return False
def f_run(regle, obj):
'''#aide||execute un programme exterieur
#aide_spec||attribut qui recupere le resultat, parametres , run , nom, parametres
#pattern||?A;?C;?A;run;C;?C
#schema||ajout_attribut
'''
chaine = ' '.join((regle.params.cmp1.val, regle.params.cmp2.val,
obj.attributs.get(regle.params.att_entree.val, regle.params.val_entree.val)))
fini = subprocess.run(chaine, stderr=subprocess.STDOUT)
if regle.params.att_sortie.val:
obj.attributs[regle.params.att_sortie.val] = str(fini)
| klix2/mapper0_8 | pyetl/moteur/fonctions/traitement_divers.py | traitement_divers.py | py | 26,649 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "outils.charge_mapping",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "outils.remap",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "outils.prepare_e... |
13536005652 | from lib import setter, getter, io_tools
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--config", type = str, help = "path to campaigns config json file")
parser.add_argument("--PU", type = str, help = "name of the pileup sample to set sitewhitelist for")
parser.add_argument("--sites", type = str, nargs = "*", help = "site whitelist for the pileup")
args = parser.parse_args()
config_dict = io_tools.import_jsonfile_as_OrderedDict(args.config)
campaigns = getter.get_campaigns_given_PU(config_dict, args.PU)
for campaign in campaigns:
config_dict[campaign]['secondaries'][args.PU]['SiteWhitelist'] = args.sites
io_tools.export_dict_to_jsonfile(config_dict, 'campaigns.json')
| tyjyang/CampaignManager | scripts/set-sitewhitelist-for-PU.py | set-sitewhitelist-for-PU.py | py | 712 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "lib.io_tools.import_jsonfile_as_OrderedDict",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "lib.io_tools",
"line_number": 10,
"usage_type": "name"
},
{
"a... |
30722724901 | #programmers_단어 변환
#=== import module ===#
from collections import deque
#=== variable declare ===#
#=== Function define ===#
def solution(begin, target, words):
if target not in words: return 0; #불가능한 경우
queue = deque();
queue.append([begin,0]); #current, visited
level = 0;
succeed = False;
while queue and not succeed:
level += 1;
for i in range(len(queue)):
current,visited = queue.popleft();
for idx in range(len(words)):
if visited & (1 << idx) != 0: continue; #이미 방문한 단어
nextWord = words[idx];
diff = 0;
for i in range(len(current)):
if current[i] != nextWord[i]: diff += 1;
if diff != 1: continue; #다른 것이 2개 이상이라서 한번에 변환 불가능
if nextWord == target: #성공 조건
succeed = True; break;
queue.append([nextWord,visited | (1 << idx)]);
if succeed: return level;
else: return 0;
#=== main function ===#
print(solution("hit","cog",["hot", "dot", "dog", "lot", "log"]));
| Hoony0321/Algorithm | 2022_02/26/programmers_단어 변환.py | programmers_단어 변환.py | py | 1,080 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
}
] |
3918203704 | import re
import os
import string
import shutil
import tempfile
import fontforge
import argparse
from string import Template
from pathlib import Path
from bs4 import BeautifulSoup
from bs4.formatter import XMLFormatter
class Colors:
OK = '\033[92m'
INFO = '\033[94m'
WARN = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
class SVGProcessor:
_path = None
_spool = None
_font_name = 'IconFont'
_qml_namespace = 'IconFont'
_qml_element_name = 'Icon'
_copyright = '(C) 2020 GONICUS GmbH'
_out_path = '.'
_strip_rect = False
_qt = False
def __init__(self, source_path, spool):
self._path = source_path
self._spool = spool
def run(self):
objects = {}
objects_lt = {}
index = 61000
for path in Path(self._path).rglob('*.svg'):
try:
svg = BeautifulSoup(open(path).read().encode('utf-8'), 'xml')
except FileNotFoundError:
print(f"{Colors.FAIL}✗{Colors.ENDC} file not found {Colors.BOLD}{path}{Colors.ENDC}")
return
if self._process(svg, path):
spool_name = os.path.join(self._spool, f'{index}.svg')
with open(spool_name, 'w') as f:
f.write(svg.prettify(formatter=XMLFormatter()))
objects[index] = spool_name
objects_lt[index] = os.path.splitext(str(path)[len(self._path) + 1:])[0]
index += 1
# Do font processing
if self._make_font(objects):
self._write_header()
self._write_cpp(objects_lt)
if self._qt:
self._write_qml()
def _write_header(self):
font_name = self._font_name.upper()
file_name = self._font_name + '.h'
if self._qt:
header = Template("""#ifndef ${FONT_NAME}_H
#define ${FONT_NAME}_H
#include <QObject>
#include <QtQml>
class ${NAME}Resolver : public QObject {
Q_OBJECT
QML_ELEMENT
public:
explicit ${NAME}Resolver(QObject* parent = nullptr);
virtual ~${NAME}Resolver() {}
Q_INVOKABLE quint16 indexOfPath(const QString& iconPath);
};
#endif
""")
else:
header = Template("""#ifndef ${FONT_NAME}_H
#define ${FONT_NAME}_H
#include <cstdint>
#include <string>
namespace $NAME {
uint16_t index(const std::string& path);
}
#endif
""")
with open(os.path.join(self._out_path, file_name), 'w') as f:
f.write(header.substitute(FONT_NAME=font_name, NAME=self._font_name))
print(f'{Colors.OK}✓{Colors.ENDC} {f.name} has been generated')
def _write_cpp(self, objects):
font_name = self._font_name.upper()
file_name = self._font_name + '.cpp'
data = '\n'.join(f' {{ "{name}", {index} }},' for index, name in objects.items())
if self._qt:
code = Template("""#include <QFontDatabase>
#include <QHash>
#include "${NAME}.h"
${NAME}Resolver::${NAME}Resolver(QObject* parent) : QObject(parent) {
static bool initialized = false;
if (!initialized) {
initialized = true;
QFontDatabase::addApplicationFont(":/${NAME}.ttf");
}
}
quint16 ${NAME}Resolver::indexOfPath(const QString& iconPath) {
static QHash<const QString, quint16> lookup_table {
$DATA
};
return lookup_table.value(iconPath, 0);
}
""")
else:
code = Template("""#include <iostream>
#include <map>
#include "${NAME}.h"
namespace $FONT_NAME {
uint16_t index(const std::string& path) {
static std::map<std::string, uint16_t> lookup_table {
$DATA
};
auto idx = lookup_table.find(path);
return idx == lookup_table.end() ? 0 : idx->second;
}
}
""")
with open(os.path.join(self._out_path, file_name), 'w') as f:
f.write(code.substitute(NAME=self._font_name, FONT_NAME=font_name, DATA=data))
print(f'{Colors.OK}✓{Colors.ENDC} {f.name} has been generated')
def _write_qml(self):
font_name = self._font_name.upper()
file_name = self._font_name + '.qml'
code = Template("""import QtQuick 2.15
import ${COMPONENT} 1.0 as IconFont
/// Loads and displays an icon of the icon font by giving the path to the icon svg file
Item {
id: control
width: icon.implicitWidth
height: control.size
/// Path to the icon svg file that should be loaded; empty string (default) unloads the icon
property string iconPath
/// Size of the icon in pixels (default: 32)
property int size: 32
/// Color of the icon (default: black)
property alias color: icon.color
IconFont.${NAME}Resolver {
id: resolver
}
Text {
id: icon
text: String.fromCharCode(resolver.indexOfPath(control.iconPath))
verticalAlignment: Text.AlignVCenter
horizontalAlignment: Text.AlignHCenter
anchors.centerIn: parent
font.family: "${NAME}"
font.pixelSize: control.size
}
}
""")
with open(os.path.join(self._out_path, self._qml_element_name + ".qml"), 'w') as f:
f.write(code.substitute(FONT_NAME=font_name, NAME=self._font_name, COMPONENT=self._qml_namespace))
print(f'{Colors.OK}✓{Colors.ENDC} {f.name} has been generated')
def _process(self, svg, path):
# Skip icons that have no square dimensions
main = svg.find('svg')
if 'width' in main and 'height' in main:
if main['width'] != main['height']:
print(f"{Colors.WARN}âš {Colors.ENDC} {Colors.BOLD}{path}{Colors.ENDC} aspect ratio is not 1:1 - skipping")
return False
# Remove unit from size
width = int(re.findall(r'\d+', main['width'])[0])
height = int(re.findall(r'\d+', main['height'])[0])
# Remove bounding rectangles if any
if self._strip_rect:
for rect in svg.find_all('rect'):
if int(re.findall(r'\d+', rect['height'])[0]) == height and int(re.findall(r'\d+', rect['width'])[0]) == width:
rect.extract()
# Find element
element = self._findElement(svg)
# Check if there's no element
if len(svg.find_all(element)) == 0:
print(f"{Colors.WARN}âš {Colors.ENDC} file {Colors.BOLD}{path}{Colors.ENDC} has no relevant elements - skipping")
return False
# Check if there's more than one element
if len(svg.find_all(element)) != 1:
print(f"{Colors.INFO}🛈{Colors.ENDC} file {Colors.BOLD}{path}{Colors.ENDC} has no too many elements")
# Skip icons that use a 'rotate'
if svg.find(element, transform=re.compile('^rotate\(')):
print(f"{Colors.WARN}âš {Colors.ENDC} file {Colors.BOLD}{path}{Colors.ENDC} contains rotation - skipping")
return False
return True
def _findElement(self, svg):
for el in ['path', 'polygon', 'rect', 'circle']:
if len(svg.find_all(el)) != 0:
return el
return None
def _make_font(self, objects):
first = True
font = fontforge.font()
font.encoding = 'UnicodeFull'
font.fontname = self._font_name
font.familyname = self._font_name
font.fullname = self._font_name
font.copyright = self._copyright
for index, path in objects.items():
if first:
char = font.createChar(87)
char.importOutlines(str(path))
first = False
char = font.createChar(index)
try:
char.importOutlines(str(path))
except FileNotFoundError:
print(f"{Colors.FAIL}✗{Colors.ENDC} file not found {Colors.BOLD}{path}{Colors.ENDC}")
return False
font.selection.all()
path = os.path.join(self._out_path, self._font_name + ".ttf")
font.generate(path)
print(f'{Colors.OK}✓{Colors.ENDC} {path} has been generated')
return True
def __set_font_name(self, name):
allowed = set(string.ascii_lowercase + string.ascii_uppercase + string.digits + '_')
if set(name) <= allowed:
self._font_name = name
else:
print(f"{Colors.FAIL}✗{Colors.ENDC} only uppercase/lowercase characters, digits and _ are allowed for the font name")
exit()
def __get_font_name(self):
return self._font_name
def __set_out_path(self, path):
self._out_path = path
def __get_out_path(self):
return self._out_path
def __set_copyright(self, data):
self._copyright = data
def __get_copyright(self):
return self._copyright
def __set_strip_rect(self, data):
self._strip_rect = data
def __get_strip_rect(self):
return self._strip_rect
def __set_qt(self, data):
self._qt = data
def __get_qt(self):
return self._strip_rect
def __set_qml_element(self, data):
self._qml_element_name = data
def __get_qml_element(self):
return self._qml_element_name
def __set_qml_namespace(self, data):
self._qml_namespace = data
def __get_qml_namespace(self):
return self._qml_namespace
font_name = property(__get_font_name, __set_font_name)
out = property(__get_out_path, __set_out_path)
copyright = property(__get_copyright, __set_copyright)
strip_rect = property(__get_strip_rect, __set_strip_rect)
qt = property(__get_qt, __set_qt)
qml_namespace = property(__get_qml_namespace, __set_qml_namespace)
qml_element = property(__get_qml_element, __set_qml_element)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('--font-name', help='name of the generated font', default='IconFont')
parser.add_argument('--copyright', help='copyright notice placed inside the generated TTF file', default='(C) 2020 GONICUS GmbH')
parser.add_argument('--output', help='path where generated files are placed', default='.')
parser.add_argument('--strip-bounding-rect', action="store_true", help='path where generated files are placed')
parser.add_argument('--qt', action="store_true", help='whether to build Qt/QML style output files')
parser.add_argument('--qml-namespace', help='name of the QML namespace used in your .pro file', default='IconApp')
parser.add_argument('--qml-element', help='name of the QML icon element for this font', default='Icon')
args = parser.parse_args()
with tempfile.TemporaryDirectory() as spool:
processor = SVGProcessor(args.source, spool)
processor.font_name = args.font_name
processor.out = args.output
processor.copyright = args.copyright
processor.strip_rect = args.strip_bounding_rect
processor.qt = args.qt
processor.qml_element = args.qml_element
processor.qml_namespace = args.qml_namespace
processor.run()
del processo | 10f7c7/hershey2TTF | test.py | test.py | py | 11,054 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
71960799785 | import gluonbook as gb
from mxnet.gluon import data as gdata
import sys
import time
import matplotlib.pyplot as plt
mnist_train = gdata.vision.FashionMNIST(train=True)
mnist_test = gdata.vision.FashionMNIST(train=False)
# 训练集和测试集中每个类别的图像分别为6000, 1000, 因此len(mnist_train)=60000, len(mnist_test) = 10000
print(len(mnist_train), len(mnist_test))
# feature 对应高和宽均为28像素的图像, 每个像素的数值为0-255之间的8位无符号整数(unit8). 使用三维NDArray存储
feature, label = mnist_train[0]
print(feature.shape, feature.dtype)
print(label, type(label), label.dtype)
# 将数值标签转成相应的文本标签
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
# 定义可以在一行里画出多个图像和对应标签的函数
def show_fashion_mnist(images, labels):
#gb.use_svg_display()
# 这里的 _ 表示我们忽略(不使用)的变量。
_, figs = plt.subplots(1, len(images), figsize=(12, 12))
# zip() 函数用于将可迭代对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的对象。
# 如果各个可迭代对象的元素个数不一致,则返回的对象长度与最短的可迭代对象相同。
for f, img, lbl in zip(figs, images, labels):
f.imshow(img.reshape((28, 28)).asnumpy())
f.set_title(lbl)
f.axes.get_xaxis().set_visible(False)
f.axes.get_yaxis().set_visible(False)
plt.show()
# 显示训练集中0-11号图像
X, y = mnist_train[0:12]
show_fashion_mnist(X, get_fashion_mnist_labels(y))
batch_size = 256
# Vision Transforms: Transforms can be used to augment input data during training. You can compose multiple transforms sequentially
# ToTensor: Converts an image NDArray to a tensor NDArray.
# 通过ToTensor类将图像数据从 uint8 格式变换成 32 位浮点数格式,并除以 255 使得所有像素的数值均在 0 到 1 之间。
# ToTensor类还将图像通道从最后一维移到最前一维来方便之后介绍的卷积神经网络计算。
transformer = gdata.vision.transforms.ToTensor()
# Gluon的DataLoader允许使用多进程来加速数据读取(暂不支持 Windows 操作系统)
# 通过参数num_workers来设置4个进程读取数据。
if sys.platform.startswith('win'):
num_workers = 0
else:
num_workers = 4
# transform_first(fn, lazy=True): Returns a new dataset with the first element of each sample transformed by the transformer function fn.
# 通过数据集的transform_first函数,我们将ToTensor的变换应用在每个数据样本(图像和标签)的第一个元素,即图像之上。
# class mxnet.gluon.data.DataLoader(dataset, batch_size=None, shuffle=False, sampler=None, last_batch=None, batch_sampler=None,
# batchify_fn=None, num_workers=0, pin_memory=False, prefetch=None)
train_iter = gdata.DataLoader(mnist_train.transform_first(transformer),
batch_size, shuffle=True,
num_workers=num_workers)
# print(train_iter)
test_iter = gdata.DataLoader(mnist_test.transform_first(transformer),
batch_size, shuffle=False,
num_workers=num_workers)
# print(test_iter)
start = time.time()
for X, y in train_iter:
continue
print('%.2f sec' % (time.time() - start)) | fulinli/DeepLearning_MXNet | Fashion-MNIST.py | Fashion-MNIST.py | py | 3,590 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "mxnet.gluon.data.vision.FashionMNIST",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "mxnet.gluon.data.vision",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gluon.data",
"line_number": 8,
"usage_type": "name"
},
{
"a... |
5940101912 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pickle
import configparser
import copy
import subprocess
from distutils.util import strtobool
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.cuda.amp import autocast, GradScaler
# from AutoEncoder import AE,DataIO,FlowDataset,SlidingSampler,FSI
from AutoEncoder import AE
from AutoEncoder import DataIO as dio
from AutoEncoder import FlowDataset as fds
from AutoEncoder import SlidingSampler as ss
from ForceAutoEncoder import FAE
from ForceAutoEncoder import DataIO as dio_force
from ForceAutoEncoder import ForceDataset as forcds
from ForceAutoEncoder import SlidingSampler as ss_force
from ConvxOpt import ConvxOpt, FSI
"""Set our seed and other configurations for reproducibility."""
seed = 10
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
""" Define GradScaler """
scaler = GradScaler() # point1: Scaling the gradient information
""" read config file """
setup = configparser.ConfigParser()
setup.read('input.ini')
epochs = int(setup['DeepLearning']['epochs'])
learning_rate = float(setup['DeepLearning']['learning_rate'])
optthresh = float(setup['DeepLearning']['optthresh'])
target_loss = float(setup['DeepLearning']['target_loss'])
batch_size = int(setup['DeepLearning']['batchsize'])
window_size = int(setup['DeepLearning']['batchsize'])
sliding = int(setup['DeepLearning']['sliding'])
fc_features = int(setup['DeepLearning']['full_connected'])
control = strtobool(setup['Control']['control'])
inptype = int(setup['Control']['inptype'])
ured = float(setup['MPC']['ured'])
R = float(setup['MPC']['R'])
"""We set the preference about the CFD"""
dt = float(setup['CFD']['dt'])
mach= float(setup['CFD']['mach'])
re = float(setup['CFD']['re'])
iz = int(setup['CFD']['iz'])
"""We set the start step, the last step, the intervals"""
nst = int(setup['MPC']['nst'])
nls = int(setup['MPC']['nls'])
nin = int(setup['CFD']['nin'])
""" Dataset """
gpaths = setup['CFD']['gpaths']
fpaths = setup['CFD']['fpaths']
fmpaths= setup['Control']['fmpaths']
""" Set Dynamics """
print('Set Dynamics...\n')
dataio = dio(nst,nls,nin,gpaths,fpaths,iz,fmpaths)
grids,ibottom = dataio.readgrid()
js,je,ks,ke,ls,le,ite1,ite2,jd,imove = ibottom
# cropped indices
jcuts = [0,je+1 ,1]
kcuts = [0,ke+1-2,1]
lcuts = [0,le+1-100,1]
# output cropped grid
dataio.tweak_writegrid(['grid_z0003'],grids,jcuts,kcuts,lcuts)
flows = dataio.readflow()
control_inp = None
if control: control_inp = dataio.readformom(inptype)
# Set Tensor form
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
])
test_dataset = fds(2,jcuts,kcuts,lcuts,flows,control_inp,control,transform)
sampler = ss(test_dataset,batch_size,sliding)
test_loader = torch.utils.data.DataLoader(
test_dataset,
sampler = sampler
)
orgdatas = []
for batch,label,u in test_loader:
test = batch[0][0]
tmp = label
orgdatas.append(test)
maxstep = int( torch.max(tmp).item() )
print('Set Forces...')
dioforce = dio_force(nst,nls,nin,gpaths,fpaths,iz,fmpaths)
forces = dioforce.readformom(0) # 0 : Only CL
transform_force = torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
])
test_dataset_force = forcds(2,jcuts,kcuts,lcuts,forces,window_size,sliding,control_inp,control,transform_force)
sampler_force = ss_force(test_dataset_force,window_size,sliding)
test_loader_force = torch.utils.data.DataLoader(
test_dataset_force,
sampler = sampler_force
)
print('Start MPC')
# use gpu if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
""" Load models """
model = torch.load("learned_model")
model_force = torch.load("learned_model_force")
reconstruction = []
step = nst
""" set instances """
convxopt = ConvxOpt(batch_size,inptype)
horizon = convxopt.T # horizontal window
fsi = FSI(jcuts,kcuts,lcuts,iz,dataio,mach,re,dt,inptype,ured,horizon)
with torch.no_grad():
# Initial state variables D_0 (X_0, Y_0)
features = next( iter(test_loader) ) # D_0
for icount in range(maxstep):
print('step = ', step)
step = step + nin*sliding
# # Set Fluid Force
# batch = features[0]
# batch = torch.squeeze(batch)
# batch = batch.to(torch.float32).to('cuda')
if control: u = torch.squeeze(features[2]).to(torch.float32).to('cuda')
# ## standalized input batches
# shift = torch.mean(batch,(0,2,3)).to(torch.float32)
# scale = torch.std(batch,(0,2,3)).to(torch.float32)
# for i in range(5):
# batch[:,i,:,:] = (batch[:,i,:,:] - shift[i])/(scale[i]+1.0e-11)
# ## compute reconstructions using autocast
# with autocast(False): # point 2 :automatic selection for precision of the model
# if control:
# inp = [batch,u]
# else:
# print('MPC needs control')
# exit()
# ### Extract gx in latent space and A, B matrices
# gx,A,B = model.encoder_forMPC(inp)
# cvec = gx[:,:horizon]
# ## prepare the objective function
# exit()
# ## unstandalized
# for i in range(5):
# X_tilde[:,i,:,:] = X_tilde[:,i,:,:] * (scale[i]+1.0e-11) + shift[i]
# Deep FSI
# forces = fsi.calc_force(X_tilde[:ind_half],u[:ind_half])
''' test '''
fluid_forces = next(iter(test_loader_force))[0].to(torch.float32).to('cuda')
struct_forces = fsi.structure_force(u,inptype,ured,mach)
struct_forces = torch.from_numpy(struct_forces)[None].to(torch.float32).to('cuda')
''''''''''''
## map forces into the latent space
### map fluid forces
batch = fluid_forces
with autocast(False): # point 2 :automatic selection for precision of the model
if control:
inp = [batch,u[0]]
else:
print('MPC needs control')
exit()
### Extract gx in latent space and A, B matrices
gx,Af,Bf = model_force.encoder_forMPC(inp)
cvec_fluid = gx[:,:horizon]
### map structure forces
batch = struct_forces
with autocast(False): # point 2 :automatic selection for precision of the model
if control:
inp = [batch,u[0]]
else:
print('MPC needs control')
exit()
### Extract gx in latent space and A, B matrices
gx,_,_ = model_force.encoder_forMPC(inp)
cvec_struct = gx[:,:horizon]
# MPC
cforces = [fluid_forces,struct_forces]
u_optim = convxopt.solve_cvx(cforces,R,Af,Bf)
exit()
reconstruction.append(X_tilde[0].cpu())
# """ Calc recreated error """
recerrors = []
for i,X_tilde in enumerate(reconstruction):
recdata = X_tilde.cpu().numpy()
orgdata = orgdatas[i].cpu().numpy()
# data shape = (batch * channels * height * width)
# error_norm = np.linalg.norm(recdata-orgdata,axis=1,ord=1)
# org_norm = np.linalg.norm(orgdata,axis=1,ord=1)
error_norm = np.linalg.norm(recdata-orgdata,axis=0,ord=1)
org_norm = np.linalg.norm(orgdata,axis=0,ord=1)
recerror = error_norm/(org_norm)
recerrors.append(recerror)
f = open('recerrors.pickle', 'wb')
pickle.dump(recerrors, f)
"""## Visualize Results
Let's try to reconstruct some test images using our trained autoencoder.
"""
print('Post')
with torch.no_grad():
nstepall = np.arange(nst,nls+nin,nin*sliding)
# write grid
out_gfiles = [
'./grid_z0003'
]
dataio.writegrid(out_gfiles,grids,jcuts,kcuts,lcuts)
# write flow
statedic = []
for i,rec in enumerate(reconstruction):
batch = rec.cpu().numpy()
nstep = nstepall[i]
fname = 'recflows/u3.0/recflow_z{:0=2}_{:0=8}'.format(iz,nstep)
q = copy.deepcopy( batch )
dataio.writeflow(fname,q,jcuts,kcuts,lcuts)
| MDIFS/DeepKoopmanDynamicalFSI | mpc.py | mpc.py | py | 8,203 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.manual_seed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.manu... |
70110044584 | from django.contrib.auth import get_user_model
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from library_test_project.users.models import ScoreAbs
User = get_user_model()
class Author(models.Model):
name = models.CharField(_("Name of author"), max_length=255)
class Genre(models.Model):
name = models.CharField(_("Name of genre"), max_length=255)
class Book(ScoreAbs, models.Model):
author = models.ForeignKey(Author, on_delete=models.CASCADE, related_name="books", verbose_name=_("author"))
genre = models.ForeignKey(Genre, on_delete=models.CASCADE, related_name="genre", verbose_name=_("genre"))
name = models.CharField(_("Name of book"), max_length=255)
description = models.TextField(_("Description"))
published_date = models.DateTimeField(_("Published date"), auto_now_add=True)
scored_users = models.ManyToManyField(User, through="BookScoredUsers")
class Comment(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name="comments", verbose_name=_("Owner"))
book = models.ForeignKey(Book, on_delete=models.CASCADE, related_name="comments", verbose_name=_("Book"))
text = models.TextField(_("Text"))
created_at = models.DateTimeField(_("Date of creation"), auto_now_add=True)
class UserFavoriteBooks(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE, related_name="favorited_users")
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="favorites")
class Meta:
unique_together = ["book", "user"]
class BookScoredUsers(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
score = models.FloatField(validators=[MinValueValidator(1), MaxValueValidator(10)])
class Meta:
unique_together = ["book", "user"]
| Bakdolot/library_test_project | library_test_project/library/models.py | models.py | py | 1,968 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"ap... |
33167135913 | from collections import Counter
from contextlib import contextmanager, asynccontextmanager
import logging
import time
logger = logging.getLogger(__name__)
class TimingStats(Counter):
def __init__(self, verbose: bool = False):
super().__init__()
self.verbose = verbose
@contextmanager
def scope(self, key, *, verbose=False):
t1 = time.monotonic()
yield
sec = time.monotonic() - t1
self[key] += sec
if self.verbose:
logger.debug(f"{key} took {sec:.3f} seconds")
@asynccontextmanager
async def async_scope(self, key, *, verbose=False):
t1 = time.monotonic()
yield
sec = time.monotonic() - t1
self[key] += sec
if self.verbose:
logger.debug(f"{key} took {sec:.3f} seconds")
def report_strings(self):
return [f"{key}: {sec:.1f} sec" for key, sec in self.items()]
| andrew-landers-by/luman-1584-blob-timeout | luman_1584/timing.py | timing.py | py | 915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "time.monotonic",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.monotonic",
... |
14191916255 | # TODO: prevent utf-8 encoding errors in CSVs
# TODO: add a progress bar for all timed processes
# TODO: Maintain History of organizations analyzed
# TODO: Show time taken to scrape and analyze (tock - tick)
#Importing Libraries
import contextlib
import csv
import json
import os
import re
import time
import warnings
from platform import platform, system
import matplotlib.pyplot as plt
import requests
import spacy
import torch
import trafilatura
from bs4 import BeautifulSoup
from newsapi import NewsApiClient
from rich import box, print
from rich.align import Align
from rich.console import Console
from rich.layout import Layout
from rich.panel import Panel
from rich.progress import track
from rich.syntax import Syntax
from rich.text import Text
from spacy import displacy
from spacy.lang.en.stop_words import STOP_WORDS
from spacytextblob.spacytextblob import SpacyTextBlob
from transformers import AutoModelForSequenceClassification, AutoTokenizer
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
# =========================#
# UTIL FUNCTIONS #
# =========================#
def parse_text_from_web(webURL: str) -> str:
"""Extracts the text from the main content of the web page. Removes the ads, comments, navigation bar, footer, html tags, etc
Args:
webURL (str): URL of the web page
Returns:
str: clean text from the web page
Raises:
trafilatura.errors.FetchingError: If the URL is invalid or the server is down
"""
with contextlib.suppress(Exception):
downloaded = trafilatura.fetch_url(webURL)
return trafilatura.extract(
downloaded,
include_comments=False,
include_tables=False,
with_metadata=False,
include_formatting=True,
target_language='en',
include_images=False,
)
# =========================#
# cleanup FUNCTIONS #
# =========================#
def cleanup_text(text: str) -> str:
"""Clean up the text by removing special characters, numbers, whitespaces, etc for further processing and to improve the accuracy of the model.
Args:
text (str): text to be cleaned up
Returns:
str: cleaned up text
"""
# text = re.sub(r'\d+', '', text) # remove numbers
# text = re.sub(r'\s+', ' ', text) # remove whitespaces
with contextlib.suppress(Exception):
# remove special characters except full stop and apostrophe
text = re.sub(r'[^a-zA-Z0-9\s.]', '', text)
# text = text.lower() # convert text to lowercase
text = text.strip() # remove leading and trailing whitespaces
text = text.encode('ascii', 'ignore').decode('ascii') # remove non-ascii characters
# split text into words without messing up the punctuation
text = re.findall(r"[\w']+|[.,!?;]", text)
text= ' '.join(text)
return text.replace(' .', '.')
# ========================#
# SCRAPING #
# ========================#
def scrape_news(organization: str) -> list:
# sourcery skip: inline-immediately-returned-variable, use-contextlib-suppress
try:
# newsAPI
api_key=os.getenv('NEWSAPI')
newsapi = NewsApiClient(api_key=api_key)
# get TOP articles, 1st page, grab 3 articles
all_articles = newsapi.get_everything(q=organization, from_param='2022-12-20', to='2023-01-12', language='en', sort_by='relevancy', page=1, page_size=10)
return all_articles
except Exception as e:
pass
# ========================#
# WRITE TO CSV #
# ========================#
def write_to_csv(organization: str, all_articles: dict) -> None:
with open('CSVs/COMMON.csv', 'w', encoding='utf-8', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Article", "Title", "Description", "URL", "Content", "Published"])
for idx, article in enumerate(all_articles['articles'], start=1):
title= article['title'].strip()
description= article['description'].strip()
publishedAt= article['publishedAt']
newsURL= article['url']
content= parse_text_from_web(newsURL)
content= cleanup_text(content)
# download the content from the url
writer.writerow([idx, article['title'], article['description'], article['url'], content, publishedAt])
print(f"✅ [bold green]SUCCESS! Wrote {idx} - [bold blue]{title}[/bold blue] to [gold1]{organization}[/gold1].csv")
# Adding the parsed content to the CSV
print(f"[bold green]DONE! WROTE {len(all_articles['articles'])} ARTICLES TO [r]COMMON.csv[/r][/bold green]")
# ========================#
# SENTIMENT scoring #
# ========================#
#egt the headlines
def get_headline(content, organization):
r = requests.get(content)
#parse the text
soup = BeautifulSoup(r.content, "html.parser")
if soup.find('h1'):
headline=soup.find('h1').get_text()
if len(headline.split())<=2:
headline="No Headline"
else:
headline="No Headline"
# TODO: HANDLE IMPROVISATION OF HEADERS LATER
return headline
def sentiment_score_to_summary(sentiment_score: int) -> str:
"""
Converts the sentiment score to a summary
Args:
sentiment_score (int): sentiment score
Returns:
str: summary of the sentiment score
"""
if sentiment_score == 1:
return "Extremely Negative"
elif sentiment_score == 2:
return "Somewhat Negative"
elif sentiment_score == 3:
return "Generally Neutral"
elif sentiment_score == 4:
return "Somewhat Positive"
elif sentiment_score == 5:
return "Extremely Positive"
#calculate the sentiment score
def sentiment_analysis(content: str) -> None:
"""
Performs sentiment analysis on the text and prints the sentiment score and the summary of the score
Args:
content (str): text/url to be analyzed
"""
tokenizer = AutoTokenizer.from_pretrained(
"nlptown/bert-base-multilingual-uncased-sentiment")
model = AutoModelForSequenceClassification.from_pretrained(
"nlptown/bert-base-multilingual-uncased-sentiment")
tokens = tokenizer.encode(
content, return_tensors='pt', truncation=True, padding=True)
result = model(tokens)
result.logits
sentiment_score = int(torch.argmax(result.logits))+1
return sentiment_score_to_summary(sentiment_score)
# sourcery skip: identity-comprehension
def process_csv(organization):
with open ('word-store/negative_words.txt', 'r', encoding='utf-8') as file:
negative_words_list = file.read().splitlines()
with open ('word-store/bad_words.txt', 'r', encoding='utf-8') as file:
bad_words = file.read().splitlines()
with open ('word-store/countries.txt', 'r', encoding='utf-8') as file:
countries = file.read().splitlines()
with open('word-store/lawsuits.txt', 'r', encoding='utf-8') as file:
lawsuits = file.read().splitlines()
with open('word-store/harassment.txt', 'r', encoding='utf-8') as file:
harassment = file.read().splitlines()
# ========================#
# Creating Final csv #
# ========================#
#definig charset
with open('CSVs/COMMON-PROCESSED.csv', 'w', encoding='utf-8', newline='') as summary:
# read first row from Uber.csv
with open('CSVs/COMMON.csv', 'r', encoding='utf-8') as file:
try:
reader = csv.reader(file)
next(reader)
# write to csv
writer = csv.writer(summary)
# do for every news article
writer.writerows([["Article", "Headline", "Headline Sentiment", "Offense Rating", "Negative Words", "Offensive Words", "Tags"]])
print("[bold gold1]===============================[/bold gold1]\n\n")
for idx, row in enumerate(reader, start=1):
url= row[3]
raw_text = row[4]
# parse_text_from_web(webURL)
headline=get_headline(url, organization)
headline_sentiment=sentiment_analysis(headline)
negative_words=[]
offensive_words=[]
tags=[]
# init ofense rating
offense_rating=0
# tag as negative
if headline_sentiment == "Extremely Negative":
offense_rating+=200
elif headline_sentiment == "Somewhat Negative":
offense_rating+=100
nlp_text= nlp(raw_text)
# add custom entities
for word in nlp_text:
# if it is a negative word
if word.text.lower() in negative_words_list:
offense_rating+=10
negative_words.append(word.text)
# if it is a highly offensive word
elif word.text.lower() in bad_words:
offense_rating+=50
offensive_words.append(word.text)
# if the article is talks about lawsuits
if word.text.lower() in lawsuits:
offense_rating+=30
tags.append("lawsuit")
# if the article is about harassment
if word.text.lower() in harassment:
offense_rating+=50
tags.append("harassment")
# does article mention a country?
if word.text.lower() in countries:
tags.append("country")
# does article mention a person
if word.ent_type_ == "PERSON":
tags.append(word)
if offense_rating>20:
offense_rating-=10
# Write each row
writer.writerow(
[
idx,
headline,
headline_sentiment,
offense_rating,
list(negative_words),
list(offensive_words),
list(tags),
]
)
print(f"Article {idx} written to csv")
print(f"✔ [bold u r]\nSUCCESS! Finished processing COMMON-PROCESSED.csv[/bold u r]")
except Exception as e:
print(e)
print(e.__class__)
print(e.__doc__)
print(e.__traceback__)
# ========================#
# Display temp output #
# ========================#
#visualize the text in html
def visualize(organization):
raw_text = ''
with open('CSVs/COMMON.csv', 'r', encoding='utf-8') as file:
reader = csv.reader(file)
next(reader)
# do for every news article
for idx, row in enumerate(reader, start=1):
raw_text += row[4]
nlp_text = nlp(raw_text)
print("\n🚀 [bold magenta r]NER COMPLETE, all words tagged...[/bold magenta r]")
# serve the displacy visualizer
displacy.serve(nlp_text, style="ent")
# ========================#
# Merging Raw data #
# ========================#
def merge_csv(csv1, csv2, organization):
df1 = pd.read_csv(csv1, encoding='unicode_escape')
df2 = pd.read_csv(csv2, encoding='unicode_escape')
df = pd.merge(df1, df2, on='Article')
import random
num=random.randint(1, 100)
# # check if COMMON-ANALYSIS exists then copy and rename it to COMMON-ANALYSIS-1
# if os.path.exists('CSVs/COMMON-ANALYSIS.csv'):
# os.rename('CSVs/COMMON-ANALYSIS.csv', f'CSVs/COMMON-ANALYSIS-{num}.csv')
df.to_csv('CSVs/COMMON-ANALYSIS.csv', index=False)
print("CSVs merged to COMMON-ANALYSIS.csv")
# ========================#
# cleaing up -2 #
# ========================#
# RUN SAME FUNCTION TWICE
def final_cleanup(organization):
df = pd.read_csv('CSVs/COMMON-ANALYSIS.csv', encoding='unicode_escape')
# write - to empty cells in offensive words
df['Offensive Words'] = df['Offensive Words'].fillna('-')
# write - to empty cells in negative words
df['Negative Words'] = df['Negative Words'].fillna('-')
# write - to empty cells in tags
df['Tags'] = df['Tags'].fillna('-')
# clean up tags
df['Tags'] = df['Tags'].str.replace('[', '').str.replace(']', '').str.replace("'", '')
# clean up offensive words
df['Offensive Words'] = df['Offensive Words'].str.replace('[', '').str.replace(']', '').str.replace("'", '')
# clean up negative words
df['Negative Words'] = df['Negative Words'].str.replace('[', '').str.replace(']', '').str.replace("'", '')
df.to_csv('CSVs/COMMON-ANALYSIS.csv', index=False)
#get orgainizations url
def get_sub_url(organization):
with open ('CSVs/COMMON-ANALYSIS.csv', 'r', encoding='utf-8') as f:
with open ('CSVs/COMMON-ANALYSIS.csv', 'w', encoding='utf-8') as f2:
publisher=[]
reader = csv.reader(f)
url = [row[4] for row in reader]
# remove www. and https:// from url
url = [re.sub(r'www.', '', i) for i in url]
url = [re.sub(r'https://', '', i) for i in url]
for x in url:
name= x.split('.com/')[0]
publisher.append(name)
# replace items from publisher where character length is more than 40 with '-'
publisher = [re.sub(r'.{40,}', '-', i) for i in publisher]
print(publisher)
print("CSVs cleaned up to COMMON-ANALYSIS.csv")
# sourcery skip: identity-comprehension
nlp = spacy.load("en_core_web_trf")
# ========================#
# Console Output #
# ========================#
# no tests for this function as it is not called anywhere in the command directly
def get_terminal_width() -> int:
"""
Gets the width of the terminal.
Returns:
int: width of the terminal.
"""
try:
width, _ = os.get_terminal_size()
except OSError:
width = 80
if system().lower() == "windows":
width -= 1
return width
def print_banner(console) -> None:
"""
Prints the banner of the application.
Args:
console (Console): Rich console object.
"""
banner = """
:::: :::: :::::::::: ::::::::: ::::::::::: ::: ::: :::: ::: ::: ::: ::: ::: :::::::: ::::::::::: ::::::::
+:+:+: :+:+:+ :+: :+: :+: :+: :+: :+: :+: :+: :+:+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+:
+:+ +:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ :+:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+
+#+ +:+ +#+ +#++:++# +#+ +:+ +#+ +#++:++#++: +#++:++#++: +#+ +:+ +#+ +#++:++#++: +#+ +#++: +#++:++#++ +#+ +#++:++#++
+#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+#+# +#+ +#+ +#+ +#+ +#+ +#+ +#+
#+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+# #+#+# #+# #+# #+# #+# #+# #+# #+# #+# #+#
### ### ########## ######### ########### ### ### ### ### ### #### ### ### ########## ### ######## ########### ########
"""
width = get_terminal_width()
height = 10
# defining the panel
panel = Panel(
Align(
Text(banner, style="green"),
vertical="middle",
align="center",
),
width=width,
height=height,
subtitle="[bold blue]Built for CRIF Hackathon 2023![/bold blue]",
)
console.print(panel)
# ========================#
# Call of funtions #
# ========================#
#start cli
console = Console(record=False, color_system="truecolor")
print_banner(console)
# sourcery skip: inline-immediately-returned-variable
# ========================#
print(Panel.fit("[bold green reverse]ENTER AN ORGANIZATION NAME TO PERFORM MEDIA ANALYSIS ON[/bold green reverse]"))
organization=input()
articles=scrape_news(organization)
write_to_csv(organization, articles)
process_csv(organization)
file1='CSVs/COMMON.csv'
file2='CSVs/COMMON-processed.csv'
merge_csv(file1, file2, organization)
final_cleanup(organization)
final_cleanup(organization)
# get_sub_url(organization)
print(Panel.fit("[bold green reverse]ANALYSIS COMPLETE.[/bold green reverse]\nNow performing Named Entity Recognition on the articles and preparing a visualization."))
visualize(organization)
| HighnessAtharva/CRIF-Hackathon-2023 | SCRAPER.py | SCRAPER.py | py | 17,393 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "warnings.simplefilter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "contextlib.suppress",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "trafilatura.fetch_url",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "trafi... |
1750954085 | from ex2_utils import *
import matplotlib.pyplot as plt
from random import randrange
import numpy as np
import cv2
def presentation(plots, titles):
n = len(plots)
if n == 1:
plt.imshow(plots[0], cmap='gray')
plt.title(titles[0])
plt.show()
return
if n == 2:
fig, ax = plt.subplots(1, 2, figsize=(12, 8))
elif n % 2 == 0:
fig = plt.figure(figsize=(12, 8))
plt.gray()
for i in range(n):
ax = fig.add_subplot(2, 2, i + 1)
ax.imshow(plots[i])
ax.title.set_text(titles[i])
plt.show()
return
else:
fig, ax = plt.subplots(1, n, figsize=(4 * n, 4))
for i in range(n):
ax[i].set_title(titles[i])
ax[i].imshow(plots[i], cmap='gray')
plt.tight_layout()
plt.show()
def conv1Demo():
n = randrange(10)
Signals, Kernels = list(), list()
for i in range(n):
Signals.append(np.random.randint(5, size=10))
Kernels.append(np.random.randint(5, size=10))
good_ans = 0
for i in range(n):
for j in range(n):
np_convolution = np.convolve(Signals[i], Kernels[j])
my_convolution = conv1D(Signals[i], Kernels[j])
if np_convolution.all() == my_convolution.all():
good_ans += 1
if good_ans == len(Signals) * len(Kernels):
print("conv1Demo: All test are passed!\nGood Job!\n")
else:
print("conv1Demo: Some of test aren't passed!\nTry Again!\n")
def conv2Demo():
img = cv2.imread('pool_balls.jpeg', 0)
Kernels = [np.array([[-1, 1], [1, 1]], dtype=np.float64),
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64),
np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=np.float64),
np.array([[0., 0.25, 0.5, 0.75, 1], [0.2, 0.4, 0.6, 0.8, 1],
[1., 1.25, 1.5, 1.75, 2], [1.2, 1.4, 1.6, 1.8, 2]], dtype=np.float64)]
for i in range(4):
if Kernels[i].sum() != 0:
Kernels[i] /= (Kernels[i].sum())
good_ans = 0
for kernel in Kernels:
cv2_convolution = cv2.filter2D(img, -1, kernel, borderType=cv2.BORDER_REPLICATE)
my_convolution = conv2D(img, kernel)
if cv2_convolution.all() == my_convolution.all():
good_ans += 1
if good_ans == len(Kernels):
print("conv2Demo: All test are passed!\nGood Job!\n")
else:
print("conv1Demo: Some of test aren't passed!\nTry Again!\n")
def derivDemo():
img = cv2.imread('pool_balls.jpeg', 0)
direction, magnitude, x_der, y_der = convDerivative(img)
plots = [direction, magnitude, x_der, y_der]
titles = ["Direction", "Magnitude", "X Derivative", "Y Derivative"]
presentation(plots=plots, titles=titles)
print("derivDemo: Good Job!\n")
def blurDemo():
img = cv2.imread("coins.jpg", 0)
kernel_size = 5
plots = [img, blurImage2(img, kernel_size)]
titles = ['Image - non blurring', 'CV2 Blur']
presentation(plots=plots, titles=titles)
print("blurDemo: Good Job!\n")
def edgeDetectionSobelDemo():
img = cv2.imread("boxman.jpg", 0)
opencv_solution, my_solution = edgeDetectionSobel(img, thresh=0.1)
plots = [img, opencv_solution, my_solution]
titles = ['Original Image', 'CV2 Sobel', 'My Sobel']
presentation(plots=plots, titles=titles)
print("edgeDetectionSobelDemo: Good Job!\n")
def edgeDetectionZeroCrossingLOGDemo():
img = cv2.imread("boxman.jpg", 0)
edge_matrix = edgeDetectionZeroCrossingLOG(img)
presentation(plots=[edge_matrix], titles=["Laplacian of Gaussian\nZero Crossing Edge Detection"])
print("edgeDetectionZeroCrossingLOGDemo: Good Job!\n")
def edgeDetectionCannyDemo():
img = cv2.imread("pool_balls.jpeg", 0)
cv2_canny, my_canny = edgeDetectionCanny(img, 50, 100)
plots = [img, cv2_canny, my_canny]
titles = ['Original Image', 'CV2 Canny Edge Detection', 'My Canny Edge Detection']
presentation(plots=plots, titles=titles)
print("edgeDetectionCannyDemo: Good Job!\n")
def edgeDemo():
edgeDetectionSobelDemo()
edgeDetectionZeroCrossingLOGDemo()
edgeDetectionCannyDemo()
def houghDemo():
img = cv2.imread('coins.jpg', 0)
min_radius, max_radius = 10, 20
circles = houghCircle(img, min_radius, max_radius)
fig, ax = plt.subplots()
ax.imshow(img, cmap='gray')
for x, y, radius in circles:
circles_plots = plt.Circle((x, y), radius, color='r', fill=False)
ax.add_artist(circles_plots)
plt.title("Circle\nMy houghCircle Implementation")
plt.show()
print("houghDemo: Good Job!\n")
def main():
print("ID: 316451749\nHave Fun! :)\n")
conv1Demo()
conv2Demo()
derivDemo()
blurDemo()
edgeDemo()
houghDemo()
if __name__ == '__main__':
main()
| MoriyaBitton/Ex2_Convolution_and_Edge_Detection | ex2_main.py | ex2_main.py | py | 5,008 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ma... |
39472170581 | from flask import request
from werkzeug.exceptions import NotFound, BadRequest, Conflict
from db import db
from managers.brand import BrandManager
from managers.category import CategoryManager
from models import BrandModel, CategoryModel
from models.enums import GenderType
from models.products import ProductsModel, ProductImages, ProductPair
from sqlalchemy.sql.expression import text
from utils.operations import db_add_items, db_delete_items
def check_pair_or_image_product(item, product, item_id, product_id, item_name="item"):
if not item:
raise NotFound(f"There is not {item_name} with id: {item_id}")
if not product:
raise NotFound(f"There is not product with id: {product_id}")
if item not in product.pairs and item not in product.images:
raise BadRequest(
f"{item_name} with id: {item_id} is not attached to product with id: {product_id}"
)
class ProductManager:
@staticmethod
def create_product(product_data):
images = []
for image in product_data["images"]:
img = ProductImages(img_url=image)
images.append(img)
product_pair = []
for obj in product_data["pairs"]:
pair = ProductPair(**obj)
product_pair.append(pair)
print(product_data["pairs"])
brand_q = BrandManager.get_by_name_query(product_data["brand_name"])
category_q = CategoryManager.get_by_title_query(product_data["category_title"])
brand = brand_q.first()
category = category_q.first()
if not brand:
raise NotFound("There is no brand with that name")
if not category:
raise NotFound("There is no category with that name")
with db.session.no_autoflush:
product = ProductsModel(
title=product_data["title"],
description=product_data["description"],
price=product_data["price"],
discount=product_data["discount"],
gender=GenderType[product_data["gender"]],
)
brand.products.append(product)
category.products.append(product)
for img in images:
product.images.append(img)
for pair in product_pair:
product.pairs.append(pair)
db_add_items(product, category, brand)
return product
@staticmethod
def add_image(id, image_data):
image = ProductImages(img_url=image_data["img_url"], product_id=id)
db_add_items(image)
return image
@staticmethod
def delete_image(id, image_id):
image = ProductImages.query.filter_by(id=image_id["id"]).first()
product = ProductsModel.query.filter(
ProductsModel.id == id, text("is_deleted is FALSE")
).first()
check_pair_or_image_product(image, product, image_id["id"], id, "images")
db_delete_items(*image)
return f"You delete image with id: {image_id['id']} successfully", 202
@staticmethod
def edit_image(product_id, images_data):
images_ids = [id for id in images_data["ids"]]
new_urls = [url for url in images_data["urls"]]
product = ProductsModel.query.filter_by(id=product_id).first()
new_images = [
ProductImages(product_id=product_id, img_url=url) for url in new_urls
]
old_images = [ProductImages.query.filter_by(id=id).first() for id in images_ids]
if len(images_ids) != len(new_urls):
raise BadRequest(
"You should add same number of new images such as number of deleted one"
)
if not product:
raise NotFound(f"There is not product with id: {product_id}")
for image in old_images:
if image not in product.images:
raise NotFound(
f"The id:{id} is not attached to product with id:{product_id}"
)
try:
db_add_items(*new_images)
db_delete_items(old_images)
except:
raise BadRequest("You cannot do that operation")
return {"message": "You successful edit images"}
@staticmethod
def add_pair(id, pair_data):
product = ProductsModel.query.filter(
ProductsModel.id == id, text("is_deleted is FALSE")
).first()
is_pair = ProductPair.query.filter_by(
size=pair_data["size"], color=pair_data["color"], product_id=id
).first()
if is_pair:
raise Conflict(
f"Pair with color: {pair_data['color']} and {pair_data['size']} already attached to product with id: {id}"
)
if not product:
raise NotFound("There is no product with that id")
pair = ProductPair(**pair_data, product_id=id)
db_add_items(pair)
return pair
@staticmethod
def delete_pair(id, pair_id):
product = ProductsModel.query.filter(
ProductsModel.id == id, text("is_deleted is FALSE")
).first()
pair = ProductPair.query.filter_by(id=pair_id["id"]).first()
check_pair_or_image_product(pair, product, pair_id["id"], id, "pair")
db_delete_items(pair)
return f"You delete image with id: {pair_id['id']} successfully", 202
@staticmethod
def edit_pair(product_id, pair_id, pair_data):
product = ProductsModel.query.filter_by(id=product_id).first()
pair = ProductPair.query.filter_by(id=pair_id).first()
check_pair_or_image_product(pair, product, pair_id, product_id, "pair")
# pair.size = pair_data["size"]
# pair.color = pair_data["color"]
pair.quantity = pair_data["quantity"]
db_add_items(pair)
return pair
@staticmethod
def sell_pair(pairs):
for pair in pairs:
pair.quantity -= 1
return pairs
@staticmethod
def edit_product_base_info(id_, product_data):
# product_q = ProductsModel.query.filter(
# ProductsModel.id == id_, text("is_deleted is FALSE")
# )
product_q = ProductsModel.query.filter_by(id=id_)
product = product_q.first()
if not product:
raise NotFound("This product does not exist.")
product_q = ProductsModel.query.filter(ProductsModel.id == id_)
old_brand = product.brand
old_category = product.category
new_brand = BrandManager.get_by_name(product_data["brand_name"])
new_category = CategoryManager.get_by_name(product_data["category_title"])
if not new_brand:
raise NotFound("There is no brand with that name")
if not new_category:
raise NotFound("There is no category with that name")
product_data.pop("brand_name")
product_data.pop("category_title")
with db.session.no_autoflush:
print(product_data)
product_q.update(product_data)
if not old_brand.name == new_brand.name:
old_brand.products.remove(product)
new_brand.products.append(product)
if not old_category.title == new_category.title:
old_category.products.remove(product)
new_category.products.append(product)
db_add_items(product, new_category, old_category, new_brand, old_brand)
return product
@staticmethod
def get_one(id_, for_admin=False):
if for_admin:
product = ProductsModel.query.filter_by(id=id_).first()
else:
product = ProductsModel.query.filter(
ProductsModel.id == id_, text("is_deleted is FALSE")
).first()
if not product:
raise NotFound("This product does not exist.")
return product
@staticmethod
def get_all(for_admin=False):
category_title = request.args.get("category")
brand_name = request.args.get("brand")
gender = request.args.get("gender")
category_f = CategoryModel.title == category_title
brand_f = BrandModel.name == brand_name
if gender not in GenderType.list() and gender:
raise NotFound("There is not gender with that name")
gender_f = ProductsModel.gender == gender
if not category_title:
category_f = True
if not brand_name:
brand_f = True
if not gender:
gender_f = True
if for_admin:
products = (
ProductsModel.query.join(ProductsModel.category)
.join(ProductsModel.brand)
.filter(brand_f, category_f, gender_f)
)
else:
products = (
ProductsModel.query.join(ProductsModel.category)
.join(ProductsModel.brand)
.filter(brand_f, text("is_deleted is FALSE"), category_f, gender_f)
)
return products.all()
@staticmethod
def delete_product(id_):
product = ProductsModel.query.filter(
ProductsModel.id == id_, text("is_deleted is FALSE")
).first()
if not product:
raise NotFound("This product does not exist.")
product.is_deleted = True
db_add_items()
return "Product is deleted", 202
| a-angeliev/Shoecommerce | server/managers/products.py | products.py | py | 9,324 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "werkzeug.exceptions.NotFound",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.NotFound",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.BadRequest",
"line_number": 26,
"usage_type": "call"
},
... |
73683828585 | from typing import Optional, Tuple
import numpy as np
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.datamodules.components.diarization_dataset import (
DiarizationDataset,
DiarizationDatasetforInfer,
)
def collate_fn(batch):
ys, ts, ilens = list(zip(*batch))
ilens = np.array(ilens)
ys = np.array(
[
np.pad(y, [(0, np.max(ilens) - len(y)), (0, 0)], "constant", constant_values=(-1,))
for y in ys
]
)
ts = np.array(
[
np.pad(t, [(0, np.max(ilens) - len(t)), (0, 0)], "constant", constant_values=(+1,))
for t in ts
]
)
ys = torch.from_numpy(np.array(ys)).to(torch.float32)
ts = torch.from_numpy(np.array(ts)).to(torch.float32)
ilens = torch.from_numpy(np.array(ilens)).to(torch.int32)
return ys, ts, ilens
class DiarizationDataModule(LightningDataModule):
def __init__(
self,
data_dirs: Tuple[str, str, str],
chunk_size: int = 2000,
context_size: int = 7,
frame_size: int = 1024,
frame_shift: int = 256,
subsampling: int = 10,
sample_rate: int = 8000,
input_transform: str = "logmel23_mn",
n_speakers: int = None,
batch_sizes: Tuple[int, int, int] = (64, 64, 1),
num_workers: int = 0,
):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters(logger=False)
self.data_train: Optional[Dataset] = None
self.data_val: Optional[Dataset] = None
self.data_test: Optional[Dataset] = None
def prepare_data(self):
pass
def setup(self, stage: Optional[str] = None):
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by lightning when doing `trainer.fit()` and `trainer.test()`,
so be careful not to execute the random split twice! The `stage` can be used to
differentiate whether it's called before trainer.fit()` or `trainer.test()`.
"""
if not self.data_train and not self.data_val and not self.data_test:
train_dir, val_dir, test_dir = self.hparams.data_dirs
self.data_train = DiarizationDataset(
data_dir=train_dir,
chunk_size=self.hparams.chunk_size,
context_size=self.hparams.context_size,
frame_size=self.hparams.frame_size,
frame_shift=self.hparams.frame_shift,
subsampling=self.hparams.subsampling,
sample_rate=self.hparams.sample_rate,
input_transform=self.hparams.input_transform,
n_speakers=self.hparams.n_speakers,
)
self.data_val = DiarizationDataset(
data_dir=val_dir,
chunk_size=self.hparams.chunk_size,
context_size=self.hparams.context_size,
frame_size=self.hparams.frame_size,
frame_shift=self.hparams.frame_shift,
subsampling=self.hparams.subsampling,
sample_rate=self.hparams.sample_rate,
input_transform=self.hparams.input_transform,
n_speakers=self.hparams.n_speakers,
)
self.data_test = DiarizationDatasetforInfer(
data_dir=test_dir,
chunk_size=self.hparams.chunk_size,
context_size=self.hparams.context_size,
frame_size=self.hparams.frame_size,
frame_shift=self.hparams.frame_shift,
subsampling=self.hparams.subsampling,
sample_rate=self.hparams.sample_rate,
input_transform=self.hparams.input_transform,
n_speakers=self.hparams.n_speakers,
)
def train_dataloader(self):
return DataLoader(
dataset=self.data_train,
batch_size=self.hparams.batch_sizes[0],
num_workers=self.hparams.num_workers,
shuffle=True,
collate_fn=collate_fn,
)
def val_dataloader(self):
return DataLoader(
dataset=self.data_val,
batch_size=self.hparams.batch_sizes[1],
num_workers=self.hparams.num_workers,
shuffle=False,
collate_fn=collate_fn,
)
def test_dataloader(self):
return DataLoader(
dataset=self.data_test,
batch_size=self.hparams.batch_sizes[2],
num_workers=self.hparams.num_workers,
shuffle=False,
)
| DaseiNaN/Speech-Diarization | src/datamodules/diarization_datamodule.py | diarization_datamodule.py | py | 4,687 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 19,
... |
34866939002 | import math
from src.getTickers import *
from src.importData import *
from backtrader.indicators import ema
import datetime
GOINGDOWN_DAYS = 60
def hasNotIncreaseTooMuch(datahigh,datalow):
heighest=0
lowest=10000
for i in range(-5, 0):
heighest = max(heighest, datahigh[i])
lowest = min(lowest, datalow[i])
return datahigh < datalow*1.3
def todayIsLowest(dataclose):
lowestClose = 10000
for i in range(-GOINGDOWN_DAYS, -1):
lowestClose = min(lowestClose, dataclose[i])
return dataclose[0] <= lowestClose
def todayIsLowestClose(datalastclose,datalow):
lowest = 10000
for i in range(-GOINGDOWN_DAYS, -1):
lowest = min(lowest, datalow[i])
return datalastclose <= lowest
def findHighest(dataHighest):
maxPrice = 0
for i in range(-len(dataHighest)+1,0):
maxPrice = max(maxPrice, dataHighest[i])
return maxPrice
class zhaoMaoPiao(bt.Strategy):
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
self.ema18 = bt.ind.EMA(self.data, period=18)
self.ema60 = bt.ind.EMA(self.data, period=60)
self.dataClose = self.datas[0].close
self.dataHigh = self.datas[0].high
self.dataLow = self.datas[0].low
def next(self):
isGoingDownLongEnough = len(self) > GOINGDOWN_DAYS
today = datetime.date(2021, 6, 11)
curdate = self.datetime.date(ago=0) # 0 is the default
if(isGoingDownLongEnough and curdate==today):
compareData = findHighest(self.dataHigh)
print(curdate)
if(self.dataClose[0] < compareData/1.5 and
todayIsLowest(self.dataClose) and
self.dataClose[0] < 20):
if CURRENT_TICKER not in SELECTED_TICKERS:
print(CURRENT_TICKER)
print(curdate)
print(self.dataClose[0])
print(compareData)
SELECTED_TICKERS.append(CURRENT_TICKER)
#print('date %s, current price %.2f, previous price %.2f' % (self.datas[0].datetime.datetime(), self.sampleData.close[0], self.sampleData.close[-1]))
tickers = getAllTickers()
for ticker in tickers:
data0 = getDataFromYahooFinance(ticker)
cerebro = bt.Cerebro()
cerebro.addstrategy(zhaoMaoPiao)
cerebro.adddata(data0)
# print('----------------------------')
print('Checking ticker: %s' % ticker)
# print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
CURRENT_TICKER = ticker
SELECTED_FLAG = False
cerebro.run()
print(SELECTED_TICKERS)
| lumeng3/luluquant | src/strategy/goingDown.py | goingDown.py | py | 2,672 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.date",
"line_number": 49,
"usage_type": "call"
}
] |
21694318257 | import os
import numpy as np
import matplotlib.pyplot as plt
import re
from io import StringIO
from skimage.external.tifffile import imsave
from scipy.interpolate import griddata
from scipy.signal import medfilt
def GetChunkFromTextFile(FileName, StartStr, StopStr, skip_header=0, skip_footer=0, LastHit=True, DataType='array'):
# DataType means we can extract the chunk and then turn it into:
# 1) Numpy table 'numpy'
# 2) return the raw text 'raw'
DataType = DataType.lower()
# Read the file.
try:
with open(FileName, 'r') as myfile:
data = myfile.read()
except:
print('Failed to open ' + FileName + '. Skipping.')
return
# This regex looks for the data between the start and top strings.
reout = re.compile('%s(.*?)%s' % (StartStr, StopStr), re.S)
try:
# Extract just the data we want.
if LastHit == False:
SectionStr = reout.search(data).group(1)
else:
SectionStr = reout.findall(data)[-1]
except:
# It is possible that the user asked for something that isn't in the file. If so, just bail.
return None
if DataType == 'raw':
# Now apply skip_header and skip_footer
SectionData = SectionStr
SectionData = ''.join(SectionData.splitlines(True)[skip_header:])
if skip_footer > 0:
SectionData = ''.join(SectionData.splitlines(True)[:-skip_footer])
if DataType == 'float':
SectionData = np.float(SectionStr)
if DataType == 'array':
# Convert it into a numpy array.
SectionData = np.genfromtxt(StringIO(SectionStr), skip_header=skip_header, skip_footer=skip_footer, dtype=None)
return SectionData
def ReadXSFVolume(FileName, verbose=True, WFOffset=(0,0,0), Cutoff=0.0):
print(FileName)
Datagrid = GetChunkFromTextFile(FileName,'BEGIN_DATAGRID_3D_UNKNOWN','END_DATAGRID_3D', DataType='raw')
lines = Datagrid.splitlines()
# Line 0 is the 'BEGIN_DATAGRID_3D_UNKNOWN' header.
# Line 1 is the x, y, z dimensions of the cube in pixels.
xPixels, yPixels, zPixels = map(int, lines[1].split())
if verbose==True:
print(f'Dimension of data cube is ({xPixels}, {yPixels}, {zPixels}) pixels.')
# Line 2 is the origin.
xOrigin, yOrigin, zOrigin = map(float, lines[2].split())
if verbose==True:
print(f'Origin of data cube is ({xOrigin}, {yOrigin}, {zOrigin}) angstroms.')
# Lines 3-5 are the metric (or identify matrix if this is a cube with sides of length 1).
Mstr = ' '.join(lines[3:6])
M = np.array(list(map(float, Mstr.split()))).reshape(3,3).T
if verbose==True:
print('Metric is:')
print(M)
# All the rest of the lines are the volume values.
vstr = ' '.join(lines[6:])
v = np.array(list(map(float, vstr.split()))).reshape(xPixels, yPixels, zPixels)
# Next we need a datacube which encompases the entire volume.
# Make a cartesian grid of width 1 but same number of pixels as the xsf datacube.
yp,xp,zp = np.meshgrid(np.linspace(0,1,xPixels), np.linspace(0,1,yPixels), np.linspace(0,1,zPixels))
# Transform those coordinates to the same coordinate system as the xsf datacube.
C = np.stack([xp,yp,zp], axis=0)
x,y,z = np.einsum('ij,jklm->iklm', M,C)
# Shift the origin to zero.
x += xOrigin + WFOffset[0]
y += yOrigin + WFOffset[1]
z += zOrigin + WFOffset[2]
# The cube x,y,z now represents the coordinates of the actual space that the orbital exists in.
# we want to resample now using a new larger cube that includes the Wannier function.
# Find the bounds of the cube.
xmin = np.min(x); xmax = np.max(x);
ymin = np.min(y); ymax = np.max(y);
zmin = np.min(z); zmax = np.max(z);
# Calculate the pixel sizes from the previous coordinate system.
dx = np.linalg.norm(M.T[:,0])/xPixels
dy = np.linalg.norm(M.T[:,1])/yPixels
dz = np.linalg.norm(M.T[:,2])/zPixels
# We want our new pixels to be square, so choose the smallest dx,dy,dz.
dx = dy = dz = np.min([dx,dy,dz])
# Calculate how many pixels that now is in our new cube.
nx = np.ceil((xmax-xmin)/dx).astype(int)
ny = np.ceil((ymax-ymin)/dy).astype(int)
nz = np.ceil((zmax-zmin)/dz).astype(int)
Y,X,Z = np.meshgrid(np.linspace(xmin,xmax,nx), np.linspace(ymin,ymax,ny), np.linspace(zmin,zmax,nz))
# We are going to interpolate using griddata.
# It expects an (n,D) array of points, whereas we have (x,y,z,D)
# So collapse the first three dimensions (kind of, ravel all but the last dimension).
xyz = np.stack([x,y,z],axis=3).reshape(-1,3)
xyz.shape
XYZ = np.stack([X,Y,Z],axis=3).reshape(-1,3)
XYZ.shape
# And interpolate/extrapolate v->V from xyz->XYZ.
V = griddata(xyz, v.ravel(), XYZ, method='nearest')
# Now that we are interpolated, reshape back to (x,y,z,D).
V = V.reshape(X.shape)
# Since we use nearest interpolation it comes out a bit noisy. Fix it.
V = medfilt(V)
# # Now eliminate values close to zero.
# # Vnew = np.zeros(V.shape)
# # Vnew[V>Cutoff] = V
# print(Cutoff)
# Vind1 = V<Cutoff
# Vind2 = V>(-Cutoff)
# Vind = Vind1&Vind2
# print(Vind)
# V[Vind] = 1e-25
# Our pixel sizes are different, and medfilt can also change the amplitudes a little.
# Renormalize so that the total intensity in our new cube is the same as outside the cube.
V /= np.sum(V)
# V *= np.sum(v)
# Note this will fail if the edge of the cube doesn't have zeros or close because the extrapolation
# will extend that edge value out...
# Now eliminate values close to zero.
# Vnew = np.zeros(V.shape)
# Vnew[V>Cutoff] = V
print(Cutoff)
Vind1 = V<Cutoff
Vind2 = V>(-Cutoff)
Vind = Vind1&Vind2
V[Vind] = 1e-9
return(X, Y, Z, V.astype('float32'))
if __name__ == '__main__':
X,Y,Z,V = ReadXSFVolume('NiO_00001.xsf', verbose=False) #, Cutoff=0.001) #, WFOffset=(0,0,3.5945353))
imsave('NiO_00001.tif', V)
print('Done.')
| ZGainsforth/QEScripts | Wannier/ReadXSFVolume.py | ReadXSFVolume.py | py | 6,099 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.float",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number"... |
14722446132 | from pycorenlp import StanfordCoreNLP
import os, json, sys
#os.chdir("C:/Program Files/stanford-corenlp-4.2.2")
#os.system("java -mx5g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -timeout 10000")
nlp = StanfordCoreNLP('http://localhost:9000')
annotators = "ssplit,ner,depparse"
ner_keys = ["PERSON", "LOCATION", "ORGANIZATION", "NUMBER", "DATE", "EMAIL",
"URL", "CITY", "STATE_OR_PROVINCE", "COUNTRY", "NATIONALITY",
"RELIGION", "TITLE", "IDEOLOGY"]
reference_keys = ["basicDependencies","enhancedDependencies","enhancedPlusPlusDependencies"]
dataset_path = "C:/Users/Mark/Marco/Magistrale/Anno I/Secondo semestre/DS & ML/Progetto/Social-Mapper-Extended/social_mapper2/dataset/"
for account in os.listdir(dataset_path):
if account == "log.txt":
continue
#if "nlp.json" in os.listdir(dataset_path + account):
# continue
print(account)
js = open(dataset_path + account+ "/bio.json")
sentence = json.load(js)
print(sentence)
res = nlp.annotate(sentence,
properties={
'annotators': annotators,
'outputFormat': 'json',
'timeout': 1000,
})
if isinstance(res,str):
continue
nlp_res = dict()
nlp_res["entities"] = []
nlp_res["references"] = []
for sent in res["sentences"]:
check_references = []
for m in sent["entitymentions"]:
mention = m['text']
ner = m["ner"]
if "nerConfidences" in m.keys():
ner_confidence = m['nerConfidences']
if isinstance(ner_confidence, dict):
if ner in ner_confidence.keys():
ner_confidence = ner_confidence[ner]
else:
ner_confidence = "None"
if ner in ner_keys:
find = False
for entity in nlp_res["entities"]:
if ner in entity.keys():
find = True
entity[ner].append(mention)
if ner in ["TITLE", "ORGANIZATION"]:
check_references.append(mention)
break
if not find:
nlp_res["entities"].append({ner:[]})
find = False
for entity in nlp_res["entities"]:
if ner in entity.keys():
find = True
entity[ner].append(mention)
if ner in ["TITLE", "ORGANIZATION"]:
check_references.append(mention)
break
for k in reference_keys:
for dependency in sent[k]:
key = dependency["governorGloss"]
if key in check_references:
find = False
for reference in nlp_res["references"]:
if key in reference.keys():
find = True
item = dependency["dependentGloss"]
if not item in reference[key]:
reference[key].append(item)
break
if not find:
nlp_res["references"].append({key:[]})
find = False
for reference in nlp_res["references"]:
if key in reference.keys():
find = True
item = dependency["dependentGloss"]
if not item in reference[key]:
reference[key].append(item)
break
with open(dataset_path+account+"/nlp.json", "w") as js:
json.dump(nlp_res, js) | gaelix98/progetto-fdsml | codici aggiunti/bio_nlp.py | bio_nlp.py | py | 4,109 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pycorenlp.StanfordCoreNLP",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_n... |
30820901838 | #Extracts second-column values from .dat files and prints them out, comma-separated, so they can be used as a colormap in VARNA
#It'll do this for all .dat files you have in your directory. If you don't want this feature just comment out everything with read_files in it
#and unindent as needed.
#I also plot out the values for just A/C reads.
#I'm also printing out Yeo-Johnson or arcsinh-transformed reads--this is useful if there's a wide range of values [0 included] and you don't want a high-read nt to affect your colormap visualization dramatically.
#I also plot reads for a given sequence transformed both ways for the sake of comparison.
#If you're curious about Yeo-Johnson--its main benefit is that it can transform exponentially distributed data into normally-distributed data, with the additional perk of being able to deal with negative/zero values [unlike a Boxcox transform]
#https://machinelearningmastery.com/how-to-transform-data-to-fit-the-normal-distribution/ does a nice job explaining what the Yeo-Johnson is/what it does.
import re
import numpy as np
import glob
import matplotlib.pyplot as plt
from sklearn.preprocessing import PowerTransformer
read_files = glob.glob("*.dat")
sequences = open("21_cleavage_windows_final.txt", "r")
all_sequences = {}
for line in sequences:
if ">" in line:
seqname = line[1:-1]
else:
all_sequences[seqname]=line[:-1]
sequences.close()
j = 1
for datfile in read_files:
infile = open(datfile, "r")
#comment out this regex stuff if your .dat file isn't named "gene.dat"--with my naming convention this extracts the gene name for me
regex = r"^[a-zA-Z]+"
matches = re.findall(regex, datfile) #say the filename is atpi.dat. This extracts "atpi"
name = matches[0]
values = [] #array of all second-column values, i.e. the values of interest for the colormap
for line in infile:
reads = line.split("\t")[1] #Each line is tab-separated. We want the value in the second column.
reads = reads[:-1] #There's a \n at the end of the "reads" value, which counts as a single character.
values.append(reads)
values = np.array(values[:]).astype(float)
ac_values = []
sequence = all_sequences[name]
for i in range(len(sequence)):
if sequence[i]=="A" or sequence[i]=="C":
ac_values.append(values[i]) #only add dms reads corresponding to A/C nts to ac_values
#########plotting reads for all nts###########
'''
plt.figure(j)
plt.hist(values, color="lemonchiffon", bins=np.arange(0, max(values)+2,1.0), edgecolor="darkgoldenrod",align="mid")
plt.xticks(np.arange(min(values), max(values)+2, 1.0),rotation="vertical")
plt.autoscale()
plt.xlabel("Read count")
plt.ylabel("Frequency")
plt.title(name+" DMS untransformed reads")
j += 1
plt.draw()
'''
values_to_transform = values[:] #The dms values were strings earlier--we need to convert to floats to manipulate
#log transform
for i in range(len(values_to_transform)):
value = values_to_transform[i]
if value == 0:
values_to_transform[i] = 1e-7 #add as a pseudocount
transformed_vals = np.log(values_to_transform)
#This gets a bit convoluted. Basically I find the second-smallest value in transformedvals [so, the smallest nonzero value], add that value to all values in
#transformedvals and then set any negative values to 0
findmin = transformed_vals[:]
minval = min(findmin)
findmin = findmin[findmin!=minval] #from https://stackoverflow.com/questions/53541156/how-to-remove-all-occurrences-of-an-element-from-numpy-array
smallestnonzero = min(findmin)
offset = 1 #set the second-lowest values to 1
transformed_vals = [i+np.abs(smallestnonzero)+offset for i in transformed_vals]
for i in range(len(transformed_vals)):
value = transformed_vals[i]
if value < offset: #if it's <offset it's smaller than smallestnonzero
transformed_vals[i] = 0
#arcsinh transform
#transformed_vals = np.arcsinh(values_to_transform)
#implementing Yeo-Johnson as per https://stackoverflow.com/questions/53624804/how-to-normalize-a-non-normal-distribution
#values_to_transform = values_to_transform.reshape(-1,1) #convert to a 2d array
#pt = PowerTransformer(method='yeo-johnson')
#calculate the right parameters to fit the data [this is lambda from the transform]
#pt.fit(values_to_transform)
#transformed_vals = pt.transform(values_to_transform)
plt.figure(j)
plt.hist(transformed_vals, color="tomato", bins=np.arange(0, max(transformed_vals)+2,1.0), edgecolor="white",align="mid")
plt.xticks(np.arange(min(transformed_vals), max(transformed_vals)+2, 1.0),rotation="vertical")
plt.autoscale()
plt.xlabel("Read count")
plt.ylabel("Frequency")
plt.title(name+" DMS log-transformed reads")
j += 1
plt.draw()
#######plotting reads for a/c only########
'''
plt.figure(j)
plt.hist(ac_values, color="goldenrod", bins=np.arange(0, max(ac_values)+2,1.0), edgecolor="white",align="mid")
plt.xticks(np.arange(min(ac_values), max(ac_values)+2, 1.0),rotation="vertical")
plt.autoscale()
plt.xlabel("Read count")
plt.ylabel("Frequency")
plt.title(name+" DMS untransformed A/C reads")
j += 1
plt.draw()
'''
ac_values_to_transform = ac_values[:] #The dms values were strings earlier--we need to convert to floats to manipulate
#log transform
for i in range(len(ac_values_to_transform)):
value = ac_values_to_transform[i]
if value == 0:
ac_values_to_transform[i] = 1e-7
ac_transformed_vals = np.log(ac_values_to_transform)
#This gets a bit convoluted. Basically I find the second-smallest value in transformedvals [so, the smallest nonzero value], add that value to all values in
#transformedvals and then set any negative values to 0
findminac = ac_transformed_vals[:]
minac = min(findminac)
findminac = findminac[findminac!=minac] #findminac with all instances of the smallest value removed
smallestnonzeroac = min(findminac)
offset = 1 #the difference you want between the smallest [0] value and the second-smallest value
ac_transformed_vals = [i+np.abs(smallestnonzeroac)+offset for i in ac_transformed_vals]
for i in range(len(ac_transformed_vals)):
value = ac_transformed_vals[i]
if value < offset:
ac_transformed_vals[i] = 0
#arcsinh transform
#ac_transformed_vals = np.arcsinh(ac_values_to_transform)
'''
#implementing Yeo-Johnson as per https://stackoverflow.com/questions/53624804/how-to-normalize-a-non-normal-distribution
ac_values_to_transform = np.array(ac_values_to_transform).astype(float).reshape(-1,1) #convert to a 2d array
pt = PowerTransformer(method='yeo-johnson')
#calculate the right parameters to fit the data [this is lambda from the transform]
pt.fit(ac_values_to_transform)
ac_transformed_vals = pt.transform(ac_values_to_transform)
'''
plt.figure(j)
plt.hist(ac_transformed_vals, color="skyblue", bins=np.arange(0, max(ac_transformed_vals)+2,1.0), edgecolor="white",align="mid")
plt.xticks(np.arange(min(ac_transformed_vals), max(ac_transformed_vals)+2, 1.0),rotation="vertical")
plt.autoscale()
plt.xlabel("Read count")
plt.ylabel("Frequency")
plt.title(name+" DMS log-transformed A/C reads")
j += 1
plt.draw()
#print name+" reads:\n" + ",".join(values.astype(str))+"\n" #i.e. print "atpI reads: \n" followed by the reads
#print "Arcsinh-transformed "+name+" reads:\n" + ",".join(transformed_vals.astype(str))+"\n" #i.e. print "arcsinh-transformed atpI reads: \n" followed by the transformed reads
infile.close()
plt.show()
| gwlilabmit/Ram_Y_complex | paired_prob/plot_dat.py | plot_dat.py | py | 7,427 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 73,
... |
30380624251 | import os
from datetime import timedelta
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get("DEBUG"))
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS").split(" ")
# Application definition
INSTALLED_APPS = [
# django default apps
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# third-party apps
"djoser",
"corsheaders",
"rest_framework",
"rest_framework.authtoken",
# custom app
"authentify.apps.AuthentifyConfig",
"quiz.apps.QuizConfig",
]
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "backend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "backend.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("POSTGRES_DB"),
"USER": os.environ.get("POSTGRES_USER"),
"PASSWORD": os.environ.get("POSTGRES_PASSWORD"),
"HOST": os.environ.get("POSTGRES_HOST"),
"PORT": os.environ.get("POSTGRES_PORT"),
}
}
AUTH_USER_MODEL = "authentify.User"
# Password validation
# https://docs.djangoproject.com/en/4.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
}
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Internationalization
# https://docs.djangoproject.com/en/4.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.1/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.1/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
MAX_QUESTION_PER_QUIZ: int = 10
REST_USE_JWT = True
JWT_AUTH_COOKIE = "quiz-auth"
JWT_AUTH_REFRESH_COOKIE = "quiz-refresh-token"
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 10,
}
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(days=1),
"BLACKLIST_AFTER_ROTATION": False,
"USER_ID_FIELD": "uuid",
}
DJOSER = {
"LOGIN_FIELD": "email",
"PASSWORD_RESET_CONFIRM_URL": "password/reset/confirm/{uid}/{token}",
}
CORS_ALLOW_ALL_ORIGINS = True
REDIS_HOST = os.environ.get("REDIS_HOST")
REDIS_PORT = os.environ.get("REDIS_PORT") | Lord-sarcastic/quiz | backend/settings.py | settings.py | py | 4,013 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line... |
22772365443 | from tkinter import*
from tkinter import ttk, messagebox
import datetime as dt
import openpyxl
import pandas as pd
import os
import csv
class dataEntry:
def __init__(self,root):
self.root = root
self.root.title("Quality tracker")
self.root.geometry("1000x800+0+0")
self.root.pack_propagate(False) # tells the root to not let the widgets inside it determine its size.
self.root.resizable(0, 0)
self.user = os.getlogin()
#self.bg=ImageTk.PhotoImage(file=r'C:\Users\mutta\Desktop\test1\wallpaper_tk1.jpg')
#bg=Label(self.root,image=self.bg).place(relwidth = 1, relheight = 1)
frame1 = Frame(self.root, bg= "DarkCyan")
frame1.place(x=0.5, y=0.5, width =2000, height = 80)
frame2 = Frame(self.root, bg= "White")
frame2.place(x=0.5, y=80.5, width =2000, height = 1000)
title = Label(frame1, text= "Business Reviews Audit Entry", font=("times new roman", 20, "bold"), bg = "DarkCyan", fg = 'white').place(x=30,y=30)
date= dt.datetime.now()
date = Label(frame2, text=f"{date:%A, %B %d, %Y}", font="Calibri, 10", bg='white', fg='black')
date.place(x=600, y=2)
Auditor_login = Label(frame2, text= "Auditor Login:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=30)
self.txt_Auditor_login = Label(frame2, text= self.user, font = ("calibri", 15, "bold"), bg= "white", fg="black")
self.txt_Auditor_login.place(x=250, y= 30, width =100)
File_name = Label(frame2, text= "File Name:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=70)
self.txt_File_name = Entry(frame2, font = ("times new roman", 10), bg= "lightgray")
self.txt_File_name.place(x=250, y= 75, width =250)
Marketplace = Label(frame2, text= "Marketplace:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=110)
self.cmb_Marketplace = ttk.Combobox(frame2, font = ("times new roman", 12), state= "readonly", justify=CENTER)
self.cmb_Marketplace['values']=("Select","EN","DE","FR","IT","JP","ES","UK","CA","IN","None")
self.cmb_Marketplace.place(x=250, y= 115, width =100)
self.cmb_Marketplace.current(0)
Audit_sample = Label(frame2, text= "Audit Sample:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=150)
self.txt_Audit_sample = Entry(frame2, font = ("times new roman", 15), bg= "lightgray")
self.txt_Audit_sample.place(x=250, y= 155, width =100)
Error_count = Label(frame2, text= "Error Count:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=190)
self.txt_Error_count =Entry(frame2, font = ("times new roman", 15), bg= "lightgray")
self.txt_Error_count.place(x=250, y= 195, width =100)
Classifier_login = Label(frame2, text= "Classifier login:", font=("times new roman", 15, "bold"), bg = "white", fg = 'black').place(x=50,y=230)
self.txt_Classifier_login = Entry(frame2, font = ("times new roman", 15), bg= "lightgray")
self.txt_Classifier_login.place(x=250, y= 235, width =100)
button = Button(text = 'Submit', font = ("times new roman", 15),bg='DarkCyan', fg='white', cursor="hand2", command = self.auditDetails).place(x=500, y= 450, width = 100)
def clear(self):
self.txt_File_name.delete(0,END)
self.cmb_Marketplace.current(0)
self.txt_Audit_sample.delete(0,END)
self.txt_Error_count.delete(0,END)
self.txt_Classifier_login.delete(0,END)
def auditDetails(self):
if self.txt_Auditor_login=="" or self.txt_File_name.get()=="" or self.cmb_Marketplace.get()=="" or self.txt_Audit_sample.get()=="" or self.txt_Error_count.get()=="" or self.txt_Classifier_login.get()=="":
messagebox.showerror("Oops, Error!","All fields are mandatory", parent=self.root)
elif str(self.user)==str(self.txt_Classifier_login.get()):
messagebox.showerror("Oops, Error!","Auditor ID can't be same as Classifier ID", parent=self.root)
else:
try:
al = self.user
fn = self.txt_File_name.get()
mp = self.cmb_Marketplace.get()
asc =self.txt_Audit_sample.get()
ec =self.txt_Error_count.get()
cl = self.txt_Classifier_login.get()
dtn = dt.datetime.now()
dtns = dtn.strftime("%d-%m-%Y")
accuracy = int((int(asc)-int(ec))*100/int(asc))
'''
df1 = pd.DataFrame({"Auditor login": [al],"File Name":[fn], "Marketplace":[mp],"Audit Sample":[asc],"Error Count":[ec],"Classifier login":[cl],"Date":[dtns]})
df2 = pd.read_excel(r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx", index_col=[0])
print(df1)
print(df2)
df3 = df2.append(df1, ignore_index=True)
df3.drop(df3.filter(regex="Unname"),axis=1, inplace=True)
df3.to_excel((r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx"), index=False)
#df.to_excel(writer,index=False,header=False,startrow=len(reader)+1)
'''
# use incase if .txt output is needed
audit_fields=["Auditor login","File Name","Marketplace","Audit Sample","Error Count","Classifier login","Date"]
audit_values=[self.user,self.txt_File_name.get(),self.cmb_Marketplace.get(),self.txt_Audit_sample.get(),self.txt_Error_count.get(),self.txt_Classifier_login.get(),dt.datetime.now()]
s= '\n'+al+'\t'+fn+'\t'+mp+'\t'+asc+'\t'+ec+'\t'+cl+'\t'+dtns+'\t'+str(accuracy)
f= open((r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.txt"),'a')
f.write(s)
f.close()
# converting to excel
tf_df_new = pd.read_csv(r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.txt", sep = '\t')
tf_df_new.to_excel(r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx", index=False)
# deleting unnamed cols
file = r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx"
excel_file = openpyxl.load_workbook(file)
excel_sheet = excel_file['Sheet1']
# delete column
excel_sheet.delete_cols(idx=9 , amount=1)
excel_file.save(file)
# use incase if .csv output is needed
'''
with open(r"\\ant.amazon.com\dept-as\HYD11\GroupData\ABSC-HYD\ABSC-Ops-Team\Business Reviews\audit_details.xlsx", "a") as fs:
w = csv.writer(fs,dialect = 'excel-tab')
w.writerow([al,fn,mp,asc,ec,cl,dtns])
fs.close()
'''
if accuracy < 98:
messagebox.showinfo("Alert!",f"Reassign the file as Classification accuracy: {accuracy}%, is below the 98% target. \n\n Entry Success!", parent=self.root)
else:
messagebox.showinfo("Success!",f"Classification accuracy: {accuracy}%\n\n Entry Success!", parent=self.root)
self.clear()
except Exception as es:
messagebox.showerror("Error",f"Error due to:{str(es)}", parent = self.root)
root=Tk()
obj=dataEntry(root)
root.mainloop() | muttas/my-projects | BusinessReviews_audit_form.py | BusinessReviews_audit_form.py | py | 8,340 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getlogin",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.... |
4728646967 | import time
from io import BytesIO
from typing import List
import pandas as pd
from matplotlib import pyplot as plt
from pandas import DataFrame
from svglib.svglib import svg2rlg
from evaluate.EvaluateCore import PartAngle
import seaborn as sns
plt.rcParams['font.sans-serif'] = ['SimHei'] # 中文字体设置-黑体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
plt.ioff()
def get_local_format_time(timestamp):
local_time = time.localtime()
format_time = time.strftime("%Y%m%d%H%M%S", local_time)
return format_time
def generateROMPart(df_angles: pd.DataFrame, parts: list):
romPart = []
for part in parts:
if part == PartAngle.Knee:
romPart.append({
"title": "膝关节活动度",
"list": [
["参数Parameters", "数值Data", "单位Unit", "参考值Reference"],
["左膝关节伸展\nL.KNEE Extension", str(df_angles["LKnee_angle"].min().round(2)), "°", "0-60"],
["左膝关节屈曲\nL.KNEE Flexion", str(df_angles["LKnee_angle"].max().round(2)), "°", "0-140"],
["右膝关节伸展\nR.KNEE Extension", str(df_angles["RKnee_angle"].min().round(2)), "°", "0-60"],
["右膝关节屈曲\nR.KNEE Flexion", str(df_angles["RKnee_angle"].max().round(2)), "°", "0-140"],
["检测项共计", "", "", "4 项"]
]
})
elif part == PartAngle.Hip:
romPart.append({
"title": "髋关节活动度",
"list": [
["参数Parameters", "数值Data", "单位Unit", "参考值Reference"],
["左髋关节伸展\nL.Hip Extension", str(df_angles["TorsoLFemur_angle"].min().round(2)), "°", "0-30"],
["左髋关节屈曲\nL.Hip Flexion", str(df_angles["TorsoLFemur_angle"].max().round(2)), "°", "0-40"],
["右髋关节伸展\nR.Hip Extension", str(df_angles["TorsoRFemur_angle"].min().round(2)), "°", "0-30"],
["右髋关节屈曲\nR.Hip Flexion", str(df_angles["TorsoRFemur_angle"].max().round(2)), "°", "0-40"],
["左髋关节外展\nL.Hip Abduction", str((180 - df_angles["LHip_angle"].max() - 90).round(2)), "°",
"-"],
["左髋关节内收\nL.Hip Adduction", str((90 - (180 - df_angles["LHip_angle"].min())).round(2)), "°",
"-"],
["右髋关节外展\nR.Hip Abduction", str((180 - df_angles["RHip_angle"].max() - 90).round(2)), "°",
"-"],
["右髋关节内收\nR.Hip Adduction", str((90 - (180 - df_angles["RHip_angle"].min())).round(2)), "°",
"-"],
["左髋关节外旋\nL.Hip Internal Rotation",
str((180 - df_angles["LTibiaSelf_vector"].max()).round(2)),
"°", "-"],
["左髋关节内旋\nL.Hip External Rotation", str((df_angles["LTibiaSelf_vector"].min()).round(2)), "°",
"-"],
["右髋关节外旋\nR.Hip Internal Rotation",
str((180 - df_angles["RTibiaSelf_vector"].max()).round(2)),
"°", "-"],
["右髋关节内旋\nR.Hip External Rotation", str((df_angles["RTibiaSelf_vector"].min()).round(2)), "°",
"-"],
["检测项共计", "", "", "12 项"]
]
})
elif part == PartAngle.Pelvis:
romPart.append({
"title": "骨盆活动度",
"list": [
["参数Parameters", "数值Data", "单位Unit", "参考值Reference"],
["骨盆侧倾\nPelvis Obliquity", str((90 - df_angles["TorsoLHip_angle"].max()).round(2)), "°",
"0-10"],
["骨盆旋转\nPelvis Rotation", str((90 - df_angles["TorsoLHip_angle"].min()).round(2)), "°", "0-10"],
["检测项共计", "", "", "2 项"]
]
})
elif part == PartAngle.Ankle:
romPart.append({
"title": "踝关节活动度",
"list": [
["参数Parameters", "数值Data", "单位Unit", "参考值Reference"],
["左踝关节跖屈\nL.Ankle Plantar flexion", str(df_angles["LAnkle_angle"].max().round(2)), "°", "20"],
["左踝关节背屈\nL.Ankle Dorsiflexion", str(df_angles["LAnkle_angle"].min().round(2)), "°", "30"],
["右踝关节跖屈\nR.Ankle Plantar flexion", str(df_angles["RAnkle_angle"].max().round(2)), "°", "20"],
["右踝关节背屈\nR.Ankle Dorsiflexion", str(df_angles["RAnkle_angle"].min().round(2)), "°", "30"],
["左踝关节外翻\nL.Ankle Pronation", "-", "°", "15"],
["左踝关节内翻\nL.Ankle Supination", "-", "°", "35"],
["右踝关节外翻\nR.Ankle Pronation", "-", "°", "15"],
["右踝关节内翻\nR.Ankle Supination", "-", "°", "35"],
["检测项共计", "", "", "8 项"]
]
})
return romPart
def polt_angle_plots(df: DataFrame) -> List[BytesIO]:
metadatas = [
{
"title": "膝关节角度变化周期",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "LKnee_angle", "时间(秒)", "L 膝关节角度 (°)"],
["Time_in_sec", "RKnee_angle", "时间(秒)", "R 膝关节角度 (°)"]
]
},
{
"title": "髋关节角度变化周期(内收外展)",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "LHip_angle", "时间(秒)", "L 髋关节角度 (°)"],
["Time_in_sec", "RHip_angle", "时间(秒)", "R 髋关节角度 (°)"]
]
},
{
"title": "髋关节角度变化周期(屈曲伸展)",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "TorsoLFemur_angle", "时间(秒)", "L 髋关节角度 (°)"],
["Time_in_sec", "TorsoRFemur_angle", "时间(秒)", "R 髋关节角度 (°)"]
]
},
{
"title": "髋关节角度变化周期(外旋内旋)",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "LTibiaSelf_vector", "时间(秒)", "L 髋关节角度 (°)"],
["Time_in_sec", "RTibiaSelf_vector", "时间(秒)", "R 髋关节角度 (°)"]
]
},
{
"title": "躯干髋关节角度变化周期",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "TorsoLHip_angle", "时间(秒)", "躯干 L 髋关节角度 (°)"],
["Time_in_sec", "TorsoRHip_angle", "时间(秒)", "躯干 R 髋关节角度 (°)"]
]
},
{
"title": "踝关节角度变化周期",
"ylim": (0, 180),
"axis": [
["Time_in_sec", "LAnkle_angle", "时间(秒)", "L 踝关节角度 (°)"],
["Time_in_sec", "RAnkle_angle", "时间(秒)", "R 踝关节角度 (°)"]
]
}
]
images = []
rc = {'font.sans-serif': 'SimHei',
'axes.unicode_minus': False}
sns.set_style(style='darkgrid', rc=rc)
for metadata in metadatas:
fig, axes = plt.subplots(2, 1, figsize=(5.5, 7))
fig.suptitle(metadata["title"])
axes[0].set(ylim=metadata["ylim"])
axes[1].set(ylim=metadata["ylim"])
sns.lineplot(ax=axes[0], data=df, x=metadata["axis"][0][0], y=metadata["axis"][0][1]).set(
xlabel=metadata["axis"][0][2],
ylabel=metadata["axis"][0][3])
sns.lineplot(ax=axes[1], data=df, x=metadata["axis"][1][0], y=metadata["axis"][1][1]).set(
xlabel=metadata["axis"][1][2],
ylabel=metadata["axis"][1][3])
image = BytesIO()
fig.tight_layout()
fig.savefig(image, format='svg')
image.seek(0)
images.append(svg2rlg(image))
return images
| spianmo/GaitStudio | evaluate/ReportModuleBuilder.py | ReportModuleBuilder.py | py | 8,394 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 14,
"usage_type": "attribute"
},
{
... |
25049652193 | import numpy as np
import torch
from skimage.metrics import peak_signal_noise_ratio,structural_similarity
import natsort
import cv2
import os
from tqdm import tqdm
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = np.clip((np.transpose(image_numpy, (1, 2, 0))), 0, 1) * 255.0
return image_numpy.astype(imtype)
def pil2tensor(im): # in: [PIL Image with 3 channels]. out: [B=1, C=3, H, W] (0, 1)
return torch.Tensor((np.float32(im) / 255).transpose(2, 0 ,1)).unsqueeze(0)
def PSNR_SSIM(GT_path, Pred_Path):
GT_list = natsort.natsorted(os.listdir(GT_path))
Pred_list = natsort.natsorted(os.listdir(Pred_Path))
psnr, ssim = [], []
for GT, Pred in tqdm(zip(GT_list,Pred_list),total=len(GT_list)):
GT = cv2.imread(os.path.join(GT_path,GT))
Pred =cv2.imread(os.path.join(Pred_Path,Pred))
psnr.append(peak_signal_noise_ratio(GT,Pred))
ssim.append(structural_similarity(GT,Pred, channel_axis=2))
print("PSNR : {} SSIM: {}".format(np.average(psnr),np.average(ssim))) | Jintopia/Hint-based-Colorization | utils.py | utils.py | py | 1,302 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.uint8",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.tile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_n... |
18694607794 | # -*- coding: utf-8 -*-
"""
Functions to interact with the realsense recordings for HPPD project
"""
#%% imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import cv2
import pyrealsense2 as rs
import mediapipe
import sys
import keyboard
import os
import csv
import datetime
import time
import tqdm
import logging
from . import utils
#%% functions
def getInfoTopicTable(fileCompleteName):
'''
Returns the frequency and the number of frames in a test by means of the
functions of bagpy, consequently creates a folder in same directory of the
bag file analyzed
Counts the number of frames in the test loading the bagfile, accessing to
the topics of image data and getting the value of Message Count
Gets the frequency of execution loading the bagfile, accessing to the topics
of image data and getting the value of Frequency
Parameters
----------
fileCompleteName : .bag file
from realsense recording
Returns
-------
frequency : int
NB: the returned value is an int, the frequencies of acquisition of the two
channels may differ and are slightly lower than the nominal value
numberOfFrames : int
NB: the returned value is an estimation of the number of paired frames
Since the two streams are not paired (the pairing is done with rs.playback)
the number of frames for the color and depth images can be different and not
equal to the number of paired frames that are obtained executing a playback.
'''
# reads the bag file
b = bagpy.bagreader(fileCompleteName)
# extracts the topic table
topicTable = b.topic_table
# from the topic_table creates a new pandas dataframe with the two topics
interestingTopics = topicTable.loc[ \
(topicTable['Topics'] == '/device_0/sensor_0/Depth_0/image/data') | \
(topicTable['Topics'] == '/device_0/sensor_1/Color_0/image/data') ]
# from the new dataframe, extracts the value
frequency = np.ceil(interestingTopics.loc[:,"Frequency"].mean())
numberOfFrames = interestingTopics.loc[:,"Message Count"].max()
return frequency, numberOfFrames
def getDataFromIndex(fileCompleteName, index):
'''
Given a bag file and the index, returns:
- time stamp
- rgb image
- depth image
at the given index
To do so, a playback of the file is executed. Consequently, the highest the
index, the slowest is the function
Parameters
----------
fileCompleteName : bag file from realsense recording
contains the data of rgb and depth images
index : int
index of the data that are required
Returns
-------
timestamp_s : int
timestamp corresponding to the recording of the file
to print the corresponding date:
>>> print(datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f'))
color_image_rgb : matrix w*h*3
Contains the rgb channel values of every pixel
depth_image : matrix w*h*1
Contains the depth value of every pixel
'''
if not fileCompleteName[-4:] == '.bag':
fileCompleteName = fileCompleteName + '.bag'
# =============================================================================
# START THE STREAM OF THE PIPELINE
# =============================================================================
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, fileCompleteName, repeat_playback = False)
profile = pipeline.start(config)
device = profile.get_device()
playback = device.as_playback()
playback.set_real_time(False)
colorizer = rs.colorizer()
colorizer.set_option(rs.option.color_scheme, 1) # jet
aligned_stream = rs.align(rs.stream.color) # alignment depth -> color
# =============================================================================
# INITIALIZATION
# =============================================================================
# so at the first executuion becomes 0
frameCounter = -1
try:
while frameCounter <= index:
try:
frame = pipeline.wait_for_frames()
except:
break
# =============================================================================
# DEBUGGING
# =============================================================================
frameCounter = frameCounter + 1
# =============================================================================
# GET THE REQUIRED DATA FROM THE BAG FILE
# =============================================================================
# alignement of the frames: the obtained resolution is the one of the rgb image
frame = aligned_stream.process(frame)
# get the depth and color frames
depth_frame = frame.get_depth_frame()
color_frame = frame.get_color_frame()
# get the timestamp in seconds
timestamp_s = frame.get_timestamp()/1000
# print(datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f'))
# from frames to images
# the image saved in the bag file is in rgb format,
# the one required from mediapipe as well
color_image_rgb = np.asanyarray(color_frame.get_data())
depth_image = np.asanyarray(depth_frame.get_data())
finally:
# =============================================================================
# OTHER OPERATIONS
# =============================================================================
# stop the pipeline
pipeline.stop()
# close all the windows
cv2.destroyAllWindows()
return timestamp_s, color_image_rgb, depth_image
def loadTopic(bagreaderElement, topicName, printLoadingTime):
"""
Uses the functions of the library bagpy to extract topics from the bag file
For every topic, a csv file is generated and then loaded
Parameters
----------
bagreaderElement : return of the bagreader function
example: b = bagreader(bagFileCompletePath)
topicName : String
The name of the topic that wants to be loaded
printLoadingTime : Boolean
If True, the elapsed time to load the topic is printed
Returns
-------
A pandas dataframe corresponding to the topic
"""
if printLoadingTime:
start_time = time.time()
# creates a csv file and returns its location
message = bagreaderElement.message_by_topic(topic = topicName)
if printLoadingTime:
time_elapsed = time.time() - start_time
logging.info('Time elapsed: {:.2f} [s]'.format(time_elapsed))
# loads the csv file previously generated
dataframe = pd.read_csv(message)
if printLoadingTime:
time_elapsed = time.time() - start_time
logging.info('Time elapsed: {:.2f} [s]'.format(time_elapsed))
return dataframe
def createTimesDataFrame(metaDataframe, freq, rgb_depth):
"""
The metadata table contains 24 (21) lines for every acquired frame of the
depth (rgb) channel;
In both tables, among the other values, different times are expressed:
- index_time
- system_time
- Time of Arrival
- Backend TimeStamp
New dataframe is created, contains the four times already present and the
nominal time (the theorical one, if the acquision would work perfectly,
taking into account the length of the others)
Parameters
----------
metaDataframe : pandas dataframe of metadata
Can come from depth or rgb channel
freq : int
Frequency of acquisition of the frames
rgb_depth : string
Declares if the metadata dataframe is from depth or rgb
Returns
-------
time_df : pandas dataframe containing 5 columns
'index time';
'system time';
'arrival time';
'backend time';
'nominal time'.
global_system_time : a pandas dataframe containing 1 column
"""
# renaming for shorter handling
df = metaDataframe
# recognition if it's an rgb or a depth dataframe
if rgb_depth == 'rgb':
# how many rows for each frame
skipRows = 21
# index of the first element related to that magnitude on the table
system_time_row = 0
time_of_arrival_row = 6
backend_timestamp_row = 7
elif rgb_depth == 'depth' or rgb_depth == 'stereo' or rgb_depth == '3d':
# how many rows for each frame
skipRows = 24
# index of the first element related to that magnitude on the table
system_time_row = 0
time_of_arrival_row = 8
backend_timestamp_row = 9
else:
logging.error('not recognized dataframe')
return None
# obtaining the shape of the dataframe
(rows, columns) = df.shape
# extracting the lines from the data frames
index_time = df.iloc[np.arange(0, rows, skipRows), 0]
global_system_time = df.iloc[np.arange(system_time_row, rows, skipRows), 2].astype(float)
time_of_arrival = df.iloc[np.arange(time_of_arrival_row, rows, skipRows), 2].astype(float)
backend_timestamp = df.iloc[np.arange(backend_timestamp_row, rows, skipRows), 2].astype(float)
# some arrays are giving absolute time
system_time = (global_system_time - global_system_time.iloc[0])
time_of_arrival = (time_of_arrival - time_of_arrival.iloc[0])
backend_timestamp = (backend_timestamp - backend_timestamp.iloc[0])
# converting to numpy array
index_time_array = index_time.to_numpy()
global_system_time_array = global_system_time.to_numpy()
system_time_array = system_time.to_numpy()
time_of_arrival_array = time_of_arrival.to_numpy()
backend_timestamp_array = backend_timestamp.to_numpy()
# creating also the nominal time array
nominal_time_array = np.arange(0, len(index_time_array)*1/freq, 1/freq)
# since different precisions on len()*1/freq and np.arange is different,
# an element can be added, double check the array
nominal_time_array = nominal_time_array[0 : len(index_time_array)]
# explication of different precisions: try the code below
# print(len(index_time_array) * 1/depth_freq)
# print(nominal_time_array[-5:])
# conversion of every array from s to ms
index_time_array = index_time_array * 1000
#system_time_array # is alreay in ms
#time_of_arrival_array # is alreay in ms
#backend_timestamp_array # is alreay in ms
nominal_time_array = nominal_time_array * 1000
# creating a dataframe
d = {'index time': index_time_array, \
'system time': system_time_array, \
'arrival time': time_of_arrival_array, \
'backend time': backend_timestamp_array, \
'nominal time': nominal_time_array}
time_df = pd.DataFrame(data=d)
#display(time_df)
# check the types
#dataTypeSeries = time_df.dtypes
#print(dataTypeSeries)
d = {'global system time': global_system_time_array}
global_system_time = pd.DataFrame(data=d)
return time_df, global_system_time
def plotTiming(timeDataframe, freq, title, essentialPlots):
"""
Creates 4 subplots displaying timing information
Upper left: time elapsed at the acquisition of every frame with respect to
the start of the acquisition
Upper right: time elapsed between each couple of frames
Lower left: drift with respect to the nominal time (the final value is the
delay with respect to the theorically perfect recording)
Lower Right: Histogram of the time elapsed between each couple of frames
Parameters
----------
timeDataframe : pandas dataframe containing the timing information
use the one returned from "createTimesDataFrame"
freq : int
Frequency of acquisition of the frames
rgb_depth : string
Declares if the metadata dataframe is from depth or rgb
essentialPlot : bool
If True, only 'system time' is plotted
Returns
-------
None.
"""
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.suptitle(title, fontsize=16)
# renaming for shorter handling
if essentialPlots: # only system time is considered
df = timeDataframe[['system time', 'nominal time']]
else:
df = timeDataframe
# obtaining the shape of the dataframe
(rows, columns) = df.shape
# elapsed time
this_ax = axes[0,0]
df.plot(ax = this_ax, style = '.-')
this_ax.grid()
this_ax.set_xlabel("frame number")
this_ax.set_ylabel("[ms]")
this_ax.set_title("elapsed time to acquire each frame")
# time difference
this_ax = axes[0,1]
df.diff().plot(ax = this_ax, style = '.-')
this_ax.grid()
this_ax.set_xlabel("frame number")
this_ax.set_ylabel("[ms]")
this_ax.set_title("dt between each frame and previous one")
# distribution of time difference (gaussian hopefully)
this_ax = axes[1,1]
# solution 1: doesn't plot nominal time and resizes automatically
df.diff().loc[:,df.diff().columns != 'nominal time'].plot.hist(bins = 30, ax = this_ax, alpha = 0.5)
# solution 2: plots also nominal time but doesn't resize automatically
# plot = df.diff().plot(kind = 'density', ax = this_ax)
# this_ax.set_ylim(-0.1, 1.5)
# to give a reference with the nominal time
if freq != 0:
this_ax.axvline(1/freq*1000, label = 'nominal', color = 'C4')
this_ax.grid()
this_ax.set_xlabel("[ms]")
this_ax.set_ylabel("frequency")
# if freq != 0:
# this_ax.set_xlim(1/freq*0.7*1000, 1/freq*1.3*1000)
this_ax.set_title("time distribution")
this_ax.legend()
if freq != 0:
# new dataframe containing the difference with the nominal time
# creating an empty data frame
tmp_df = pd.DataFrame()
# getting the names of the columns from the previous database
columnNames = df.columns.values.tolist()
for column in range(0,columns):
# computing the difference, storing it in tmp
tmp = df.iloc[:,column] - df['nominal time']
# adding the tmp column to the dataframe
tmp_df[columnNames[column]] = tmp
else:
# new dataframe containing the difference between each couple
# creating an empty data frame
tmp_df = pd.DataFrame()
# getting the names of the columns from the previous database
columnNames = df.columns.values.tolist()
for i in range(columns): # for every column
for j in range(i, columns): # from i to the max number to avoid rep
if i != j: # to avoid the difference between two same array
tmp = df.iloc[:,i] - df.iloc[:,j]
tmp_df[str(columnNames[i] + ' - ' + columnNames[j])] = tmp
df = tmp_df
this_ax = axes[1,0]
df.plot(ax = this_ax, style = '.-')
this_ax.grid()
this_ax.set_xlabel("frame number")
this_ax.set_ylabel("[ms]")
this_ax.set_title("drift with respect to nominal time")
# plt.show(block=False)
# plt.pause(0.1)
def infoTiming(timeDataFrame, columnName, freq):
"""
Given a time dataframe containing a column called as specified in
columnName, for this application, the most reliable is "system time",
returns a dictionary containing information regarding the timing execution:
- 'freq th',
- 'mean freq real',
- 'std dev freq real',
- 'time stamp th [ms]',
- 'mean time stamp real [ms]',
- 'std dev time stamp real [ms]',
- 'elapsed time real [ms]',
- 'number of samples real',
- 'elapsed time th [ms]', (to acquire a number of samples equal to
number_of_samples_real, the theorical required time should be)
- 'number of samples th' {in the elapsed_time_real should have been acquired
a number of samples equal to:}
Parameters
----------
timeDataFrame : pandas dataframe
Usually system time is the most reliable one
columnName : string
Name of the column that wants to be analyzed, usually system time
freq : int
Theorical frequency of acquisition
Returns
-------
d : dictionary
Contains all timing parameters characterizing the test
"""
# renaming the dataframe for a better handling
df = timeDataFrame
(rows, columns) = df.shape
# comparison of frequencies
freq_th = float(freq)
# the number of time stamps is equal to the number of elements - 1
mean_freq_real = float((rows-1)/df[columnName].iloc[-1]*1000) #freq in Hz
std_freq_real = float(np.nanstd(1/df[columnName].diff()) * 1000) #freq in Hz
# comparison of time stamps
time_stamp_theorical = 1/freq * 1000 # from s to ms
mean_time_stamp_real = float(np.nanmean(df[columnName].diff()))
std_time_stamp_real = float(np.nanstd(df[columnName].diff()))
# comparison of elapsed time and number of samples
elapsed_time_real = float(df[columnName].iloc[-1])
number_of_samples_real = float(rows)
# to acquire a number of samples equal to number_of_samples_real,
# the theorical required time should be:
elapsed_time_theorical = number_of_samples_real / freq * 1000 # from s to ms
# in the elapsed_time_real should have been acquired a number of samples equal to:
number_of_samples_theorical = float(np.floor(elapsed_time_real/1000 * freq))
# creating the dictionary
d = {'freq th': freq_th, \
'mean freq real': mean_freq_real, \
'std dev freq real' : std_freq_real, \
'time stamp th [ms]': time_stamp_theorical, \
'mean time stamp real [ms]': mean_time_stamp_real, \
'std dev time stamp real [ms]' : std_time_stamp_real, \
'elapsed time real [ms]': elapsed_time_real, \
'number of samples real': number_of_samples_real, \
'elapsed time th [ms]': elapsed_time_theorical, \
'number of samples th' : number_of_samples_theorical}
return d
# def compareTiming(arrayOfTimes,arrayNames, *title):
# # creating the dataframe with the given arrays
# df = pd.DataFrame(arrayOfTimes).T
# # for the tile title
# if title:
# pass
# else:
# title = "comparison"
# # for the labels
# if arrayNames:
# df.columns = arrayNames
# # calling the plotTiming function with frequency = 0
# freq = 0
# plotTiming(df, freq, title, essentialPlots = False)
def logBagFile(bagFileCompletePath, depth_freq, rgb_freq, printLoadingTime, \
showPlots, essentialPlots, showTimingTable):
"""
Given a bag file, loads the metadata files regarding the rgb and the depth
channel and plots figures to show the timing execution
Parameters
----------
bagFileCompletePath : String
path to the bag file
depth_freq : Int
Frequency of acquisition of the depth channel
rgb_freq : Int
Frequency of acquisition of the rgb channel
printLoadingTime : Bool
If True, the elapsed time to load the topic is printed
It's passed to the function loadTopic
showPlots : Bool
If True, shows the plots regarding the timing execution.
It's a flag in this function
essentialPlots : Bool
If True, only system time is plotted,
It's passed to the function plotTiming
showTimingTable : Bool
If True, from the two dictionaries containing the timing information
(the one that are also returned), creates a pandas dataframe and prints it
Returns
-------
dictDEP : dictionary
Contains all parameters characterizing the test of the depth channel
dictRGB : dictionary
Contains all parameters characterizing the test of the rgb channel
df_depth_time:
df_rgb_time:
global_depth_time:
global_rgb_time:
"""
# to get the name of the file
path, fileName = os.path.split(bagFileCompletePath)
logging.info('Loading information on the file: ' + fileName)
# creates the bagreader element
b = bagpy.bagreader(bagFileCompletePath)
# loading the metadata topics (the data topics are too heavy)
df_depth_meta = loadTopic(b, '/device_0/sensor_0/Depth_0/image/metadata', printLoadingTime)
df_rgb_meta = loadTopic(b, '/device_0/sensor_1/Color_0/image/metadata', printLoadingTime)
df_depth_time, global_depth_time = createTimesDataFrame(df_depth_meta, depth_freq, 'depth')
df_rgb_time, global_rgb_time = createTimesDataFrame(df_rgb_meta, rgb_freq, 'rgb')
if showPlots:
plotTiming(df_depth_time, depth_freq, (fileName + ' - DEPTH'), essentialPlots)
plotTiming(df_rgb_time, rgb_freq, (fileName + ' - RGB'), essentialPlots)
dictDEP = infoTiming(df_depth_time, 'system time', depth_freq)
dictRGB = infoTiming(df_rgb_time, 'system time', rgb_freq)
if showTimingTable:
results = pd.DataFrame({'depth':pd.Series(dictDEP),'rgb':pd.Series(dictRGB)})
print(results)
return dictDEP, dictRGB, df_depth_time, df_rgb_time, global_depth_time, global_rgb_time
def getTimeStampArray(bagFileCompleteName, printInfo = False):
"""
Executes a playback of the whole test to get the time stamp array
Parameters
----------
bagFileCompleteName : String
directory to the bag file
printInfo : bool, optional
Set true if you want to print the timeframe stored at each iteration.
The default is False.
Returns
-------
time_stamp_array : float64 array
array containing the corresponding ms of acquisition of each frame
"""
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, bagFileCompleteName, repeat_playback = False)
profile = pipeline.start(config)
device = profile.get_device()
playback = device.as_playback()
playback.set_real_time(False)
# initialize the array
time_stamp_array = []
try:
while True:
try:
frames = pipeline.wait_for_frames()
except:
break
tmp = frames.get_timestamp()
if printInfo:
print(datetime.datetime.fromtimestamp(tmp/1000).strftime('%Y-%m-%d %H:%M:%S.%f'))
time_stamp_array = np.append(time_stamp_array, tmp)
finally:
pipeline.stop()
if printInfo:
print('all the frames were analyzed')
return time_stamp_array
def extractAviVideosFromBag(fileCompleteName, outputDir, frequency = 60, numberOfFrames = 20000, color = True, depth_splitted = True, depth_colorized = True, textOnImage = True):
'''
Saves in the specified folder a folder with the name of the test.
The subfolder contains a csv file with the timestamp of each paired frame and
two avi videos: COL and DEP channel.
For the COL video, it's simply the extraction of the rgb channel
For the DEPcolored video, it's a rendering of the depth info through a colormap
For the DEP video, a conversion of the 16 bit depth information is done in
the 3 channels where the avi video is saved:
***
# CREATE DEPTH IMAGE through conversion
dep_image_height, dep_image_width = depth_image.shape
zerosbit = np.zeros([dep_image_height, dep_image_width], dtype = np.uint8) # 480,848...
# less significan bits are the rest of the division for 256
lsb = (depth_image % 256).astype(np.uint8)
# most significan bits are the division for 256 without rest
msb = (depth_image / 256).astype(np.uint8)
depth_image_3ch = cv2.merge([zerosbit, msb, lsb])
***
When using this function, keep in mind that the avi video is a compression
of the information that each frame has
Parameters
----------
fileCompleteName : .bag file
.bag file containing the rgb/bgr frames, the depth frames and the time array
outputDir : string
directory where the files will be saved
frequency : int, optional
nominal frequency of recording, frequency for the video saved in .avi
The default is 60.
numberOfFrames : int, optional
attended number of frames in the recording. The extractor will do
numberOfFrames iterations, or, if the extraction is complete,
will stop earlier. Better put a larger number than the actual one.
Useful to print the loading bar.
The default is 20000.
textOnImage : bool, optional
set true if you want to add the timing information on the images.
The default is True.
Returns
-------
time_exec_array: array
contains information about the execution of the extraction
'''
if textOnImage:
# =============================================================================
# WRITE ON THE IMAGE PARAMS
# =============================================================================
font = cv2.FONT_HERSHEY_SIMPLEX
origin = (20, 20)
fontScale = 0.8
color = (255, 255, 255)
thickness = 1
# check extension of the file
fileCompleteName = utils.checkExtension(fileCompleteName, '.bag')
# get only the file name excluding ".bag"
fileName = os.path.split(fileCompleteName)[1][:-4]
# in order to give a unique name to the execution
thisExecutionDate = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y%m%d%H%M%S')
# create folder for the given execution of the given file
outputFileDir = os.path.join(outputDir, fileName + '-' + thisExecutionDate)
# create the folder if it doesn't exist
os.makedirs(outputFileDir, exist_ok=True)
# create the complete directory to the 3 different outputs
if color:
videoRGBCompleteName = os.path.join(outputFileDir, fileName + '-color.avi')
if depth_splitted:
videoDEPCompleteName = os.path.join(outputFileDir, fileName + '-depth splitted.avi')
if depth_colorized:
videoDEPcolorizedCompleteName = os.path.join(outputFileDir, fileName + '-depth colorized.avi')
timeCSVCompleteName = os.path.join(outputFileDir, fileName + '-timestamp.csv')
logging.info('working on: ' + fileName)
# =============================================================================
# # sometimes the function to load the bag file gets stuck, better avoid this
# # get the number of frames
# frequency, numberOfFrames = getInfoTopicTable(fileCompleteName)
# # since the method getInfoTopicTable gives an estimation of the number
# # of frames, it's better to increase this value. Executing the for loop and
# # catching the exception won't give any problem
# numberOfFrames = int(numberOfFrames * 1.2)
# =============================================================================
# =============================================================================
# START THE STREAM OF THE PIPELINE
# =============================================================================
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, fileCompleteName, repeat_playback = False)
profile = pipeline.start(config)
device = profile.get_device()
playback = device.as_playback()
playback.set_real_time(False)
colorizer = rs.colorizer()
colorizer.set_option(rs.option.color_scheme, 1) # jet
aligned_stream = rs.align(rs.stream.color) # alignment depth -> color
# =============================================================================
# INITIALIZATION
# =============================================================================
# so at the first executuion becomes 0
frameCounter = -1
# to save the timing execution of each loop (debug)
time_exec_array = [0] * numberOfFrames
# to save the starting of the execution
startTime = time.time()
# at each iteration add a new row containing landMarkArray and timestamp_s
timestamp_array = [0] * numberOfFrames
try:
for i in tqdm.tqdm(range(numberOfFrames)):
try:
frame = pipeline.wait_for_frames()
except:
break
# =============================================================================
# DEBUGGING
# =============================================================================
frameCounter = frameCounter + 1
# time frame on the execution of the loop
now = time.time()
# time_exec_array = np.append(time_exec_array, now-startTime)
time_exec_array[frameCounter] = now-startTime
# =============================================================================
# GET THE REQUIRED DATA FROM THE BAG FILE
# =============================================================================
# alignement of the frames: the obtained resolution is the one of the rgb image
frame = aligned_stream.process(frame)
# get the depth and color frames
depth_frame = frame.get_depth_frame()
color_frame = frame.get_color_frame()
# get the timestamp in seconds
timestamp_s = frame.get_timestamp()/1000
# print(datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f'))
# from frames to images
# the image saved in the bag file is in rgb format,
# the one required from mediapipe as well
color_image_rgb = np.asanyarray(color_frame.get_data())
depth_image = np.asanyarray(depth_frame.get_data())
depth_image_colorized = np.asanyarray(colorizer.colorize(depth_frame).get_data())
# CREATE COLOR IMAGE
# cv2 displays images in bgr
color_image_bgr = cv2.cvtColor(color_image_rgb, cv2.COLOR_BGR2RGB)
# CREATE DEPTH IMAGE through conversion
dep_image_height, dep_image_width = depth_image.shape
zerosbit = np.zeros([dep_image_height, dep_image_width], dtype = np.uint8) # 480,848...
# less significan bits are the rest of the division for 256
lsb = (depth_image % 256).astype(np.uint8)
# most significan bits are the division for 256 without rest
msb = (depth_image / 256).astype(np.uint8)
depth_image_3ch = cv2.merge([zerosbit, msb, lsb])
# CREATE DEPTH IMAGE COLORIZED through colorizer
depth_image_colorized = np.asanyarray(colorizer.colorize(depth_frame).get_data())
if textOnImage:
stringForImage = 'frame: {:05d} - '.format(frameCounter) + \
datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f')
# puts text on the image
if color:
color_image_bgr = cv2.putText(color_image_bgr, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
if depth_splitted:
depth_image_3ch = cv2.putText(depth_image_3ch, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
if depth_colorized:
depth_image_colorized = cv2.putText(depth_image_colorized, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
if frameCounter == 0:
# create the folder if it doesn't exist
os.makedirs(os.path.split(videoRGBCompleteName)[0], exist_ok=True)
if color:
# initialize the video saver for BGR
image_height, image_width, _ = color_image_bgr.shape
videoOutCol = cv2.VideoWriter(videoRGBCompleteName, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frequency, (image_width, image_height))
if depth_splitted:
# initialize the video saver for DEP
image_height, image_width, _ = depth_image_3ch.shape
videoOutDep = cv2.VideoWriter(videoDEPCompleteName, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frequency, (image_width, image_height))
if depth_colorized:
# initialize the video saver for DEP colorized
image_height, image_width, _ = depth_image_colorized.shape
videoOutDepCol = cv2.VideoWriter(videoDEPcolorizedCompleteName, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frequency, (image_width, image_height))
if color:
videoOutCol.write(color_image_bgr)
if depth_splitted:
videoOutDep.write(depth_image_3ch)
if depth_colorized:
videoOutDepCol.write(depth_image_colorized)
timestamp_array[frameCounter] = timestamp_s
finally:
# cut the files preallocated with
timestamp_array = timestamp_array[:frameCounter]
time_exec_array = time_exec_array[:frameCounter]
# create the folder if it doesn't exist
os.makedirs(os.path.split(timeCSVCompleteName)[0], exist_ok=True)
# create the pandas dataframe
df = pd.DataFrame(np.vstack(timestamp_array), columns=['timestamp'])
# saves the pandas dataframe in a csv file
df.to_csv(timeCSVCompleteName, index = False)
# =============================================================================
# OTHER OPERATIONS
# =============================================================================
# stop the pipeline
pipeline.stop()
# close all the windows
cv2.destroyAllWindows()
# gives few information to the user
elapsedTime = time.time()-startTime
freqOfExecution = frameCounter/elapsedTime
logging.info("{:d} frames were analyzed in {:.2f} seconds ({:.2f} frames per second)"\
.format(frameCounter, elapsedTime, freqOfExecution))
return time_exec_array
def extractPngFramesFromBag(fileCompleteName, outputDir, frequency = 60, numberOfFrames = 20000, textOnImage = True):
'''
Saves in the specified folder a folder with the name of the test.
The subfolder contains a csv file with the timestamp of each paired frame and
two other subfolders: COL and DEP channel.
For the COL folder, it's the extraction of the rgb frame,
in format w*h*3 of integer 8bit (0->255)
For the DEP folder, it's the extraction of the dep frame,
in format w*h*1 of integer 16bit (0->65535)
Parameters
----------
fileCompleteName : .bag file
.bag file containing the rgb/bgr frames, the depth frames and the time array
outputDir : string
directory where the files will be saved
frequency : int, optional
nominal frequency of recording, frequency for the video saved in .avi
The default is 60.
numberOfFrames : int, optional
attended number of frames in the recording. The extractor will do
numberOfFrames iterations, or, if the extraction is complete,
will stop earlier. Better put a larger number than the actual one.
Useful to print the loading bar.
The default is 20000.
textOnImage : bool, optional
set true if you want to add the timing information on the images.
The default is True.
Returns
-------
time_exec_array: array
contains information about the execution of the extraction
'''
if textOnImage:
# =============================================================================
# WRITE ON THE IMAGE PARAMS
# =============================================================================
font = cv2.FONT_HERSHEY_SIMPLEX
origin = (20, 20)
fontScale = 0.8
color = (255, 255, 255)
thickness = 1
# check extension of the file
fileCompleteName = utils.checkExtension(fileCompleteName, '.bag')
# get only the file name excluding ".bag"
fileName = os.path.split(fileCompleteName)[1][:-4]
# in order to give a unique name to the execution
thisExecutionDate = datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y%m%d%H%M%S')
# create folder for the given execution of the given file
outputFileDir = os.path.join(outputDir, fileName + '-' + thisExecutionDate)
# create directory of folders for saving col and dep
outputCOLDir = os.path.join(outputFileDir, 'col')
outputDEPDir = os.path.join(outputFileDir, 'dep')
# create the folders if they don't exist
os.makedirs(outputFileDir, exist_ok=True)
os.makedirs(outputCOLDir, exist_ok = True)
os.makedirs(outputDEPDir, exist_ok = True)
# create the complete directory
timeCSVCompleteName = os.path.join(outputFileDir, 'timestamp.csv')
logging.info('working on: ' + fileName)
# =============================================================================
# # sometimes the function to load the bag file gets stuck, better avoid this
# # get the number of frames
# frequency, numberOfFrames = getInfoTopicTable(fileCompleteName)
# # since the method getInfoTopicTable gives an estimation of the number
# # of frames, it's better to increase this value. Executing the for loop and
# # catching the exception won't give any problem
# numberOfFrames = int(numberOfFrames * 1.2)
# =============================================================================
# =============================================================================
# START THE STREAM OF THE PIPELINE
# =============================================================================
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, fileCompleteName, repeat_playback = False)
profile = pipeline.start(config)
device = profile.get_device()
playback = device.as_playback()
playback.set_real_time(False)
aligned_stream = rs.align(rs.stream.color) # alignment depth -> color
# =============================================================================
# INITIALIZATION
# =============================================================================
# so at the first executuion becomes 0
frameCounter = -1
# to save the timing execution of each loop (debug)
time_exec_array = [0] * numberOfFrames
# to save the starting of the execution
startTime = time.time()
# at each iteration add a new row containing landMarkArray and timestamp_s
timestamp_array = [0] * numberOfFrames
try:
for i in tqdm.tqdm(range(numberOfFrames)):
try:
frame = pipeline.wait_for_frames()
except:
break
if i == 322:
debugFlag = True
# =============================================================================
# DEBUGGING
# =============================================================================
frameCounter = frameCounter + 1
# time frame on the execution of the loop
now = time.time()
# time_exec_array = np.append(time_exec_array, now-startTime)
time_exec_array[frameCounter] = now-startTime
# =============================================================================
# GET THE REQUIRED DATA FROM THE BAG FILE
# =============================================================================
# alignement of the frames: the obtained resolution is the one of the rgb image
frame = aligned_stream.process(frame)
# get the depth and color frames
depth_frame = frame.get_depth_frame()
color_frame = frame.get_color_frame()
# get the timestamp in seconds
timestamp_s = frame.get_timestamp()/1000
# print(datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f'))
# from frames to images
# the image saved in the bag file is in rgb format,
# the one required from mediapipe as well,
# the one for cv2 should be in bgr
color_image_rgb = np.asanyarray(color_frame.get_data())
color_image_bgr = cv2.cvtColor(color_image_rgb, cv2.COLOR_BGR2RGB)
depth_image = np.asanyarray(depth_frame.get_data())
if textOnImage:
stringForImage = 'frame: {:05d} - '.format(frameCounter) + \
datetime.datetime.fromtimestamp(timestamp_s).strftime('%Y-%m-%d %H:%M:%S.%f')
# puts text on the image
color_image_bgr = cv2.putText(color_image_bgr, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
# makes no sense write on the image since it's saved in 16 bit format
# depth_image = cv2.putText(depth_image, stringForImage, origin, font, fontScale, color, thickness, cv2.LINE_AA)
frameName = '{:05d}'.format(frameCounter)
cv2.imwrite(os.path.join(outputCOLDir,frameName+'.png'), color_image_bgr)
cv2.imwrite(os.path.join(outputDEPDir,frameName+'.png'), depth_image)
timestamp_array[frameCounter] = timestamp_s
finally:
# cut the files preallocated with
timestamp_array = timestamp_array[:frameCounter]
time_exec_array = time_exec_array[:frameCounter]
# create the folder if it doesn't exist
os.makedirs(os.path.split(timeCSVCompleteName)[0], exist_ok=True)
# create the pandas dataframe
df = pd.DataFrame(np.vstack(timestamp_array), columns=['timestamp'])
# saves the pandas dataframe in a csv file
df.to_csv(timeCSVCompleteName, index = False)
# =============================================================================
# OTHER OPERATIONS
# =============================================================================
# stop the pipeline
pipeline.stop()
# close all the windows
cv2.destroyAllWindows()
# gives few information to the user
elapsedTime = time.time()-startTime
freqOfExecution = frameCounter/elapsedTime
logging.info("{:d} frames were analyzed in {:.2f} seconds ({:.2f} frames per second)"\
.format(frameCounter, elapsedTime, freqOfExecution))
return time_exec_array
| mmtlab/wheelchair_contact_detection | hppdWC/bagRS.py | bagRS.py | py | 43,231 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ceil",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.pipeline",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.config",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pyrealsense2.c... |
24680745592 | import base64
def e5(m): # base64
s = base64.b64decode(m)
s = s.decode()
return s
def e4(m, k=13): # Caesar shift cipher
m = m.lower()
s = ""
for i in range(len(m)):
s += chr((ord(m[i]) - k - 97) % 26 + 97)
return s
def e2(m, k): # Vigenere cipher
m = m.lower()
k = k.lower()
s = ""
while len(k) < len(m):
k += k
for i in range(len(m)):
s += chr((ord(m[i]) - ord(k[i])) % 26 + 97)
return s
def key_square(k):
k = k.lower()
s = ""
alphabet = "abcdefghiklmnopqrstuvwxyz"
for i in k:
if i not in s:
s += i
for j in k:
if j not in alphabet:
s += j
key_sq = []
for e in range(5):
key_sq.append('')
# Break it into 5*5
key_sq[0] = s[0:5]
key_sq[1] = s[5:10]
key_sq[2] = s[10:15]
key_sq[3] = s[15:20]
key_sq[4] = s[20:25]
return key_sq
def cipher_to_digraphs(cipher):
i = 0
new = []
for x in range(len(cipher) // 2 ):
new.append(cipher[i:i + 2])
i = i + 2
return new
def find_position(key_sq, letter):
for i in range(len(key_sq)):
s = key_sq[i]
if s.find(letter) != -1:
return i, s.find(letter)
def e1(m, k): # Playfair cipher
cipher = cipher_to_digraphs(m)
key_matrix = key_square(k)
plaintext = ""
for e in cipher:
p1, q1 = find_position(key_matrix, e[0])
p2, q2 = find_position(key_matrix, e[1])
if p1 == p2:
if q1 == 4:
q1 = -1
if q2 == 4:
q2 = -1
plaintext += key_matrix[p1][q1 - 1]
plaintext += key_matrix[p1][q2 - 1]
elif q1 == q2:
if p1 == 4:
p1 = -1
if p2 == 4:
p2 = -1
plaintext += key_matrix[p1 - 1][q1]
plaintext += key_matrix[p2 - 1][q2]
else:
plaintext += key_matrix[p1][q2]
plaintext += key_matrix[p2][q1]
return plaintext
m = "d3ZucXN0b2tib2xlamp5ZW5zdnlicGpsa3VhcGx2"
m5 = e5(m)
m4 = e4(m5, 13)
m3 = e4(m4, 20) # Since both are ceaser shift ciphers, same function is called
m2 = e2(m3, 'cryptography')
m1 = e1(m2, 'natdszgrqhebvpmxilfywcuko')
print(m1)
| SudeshGowda/Systems-recruitment-task | Decoder.py | Decoder.py | py | 2,373 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "base64.b64decode",
"line_number": 5,
"usage_type": "call"
}
] |
6554339298 | from __future__ import annotations
# IMPORTS
# =======>
# noinspection PyUnresolvedReferences
import typing
import pegen.parser as pegen
# EXPORTS
# =======>
__all__ = [
'memoize',
'memoize_left_rec',
]
# MAIN CONTENT
# ============>
if typing.TYPE_CHECKING:
from pegen.parser import Parser
F = typing.TypeVar("F", bound=typing.Callable[..., typing.Any])
P = typing.TypeVar("P", bound="Parser")
T = typing.TypeVar("T")
def memoize(method: F) -> F:
"""
A wrapper for memoize from pegen.parser that overrides list type
"""
method = pegen.memoize(method)
def wrapper(self: pegen.Parser, *args: typing.Any, **kwargs: typing.Any) -> typing.Any:
result = method(self, *args, **kwargs)
if isinstance(result, list):
return memoize.List(elements=result) # type: ignore
return result
return typing.cast(F, wrapper)
def memoize_left_rec(method: typing.Callable[[P], typing.Optional[T]]) -> typing.Callable[[P], typing.Optional[T]]:
"""
A wrapper for memoize_left_rec from pegen.parser that overrides list type
"""
method = pegen.memoize_left_rec(method)
def wrapper(self: pegen.Parser, *args: typing.Any, **kwargs: typing.Any) -> typing.Any:
result = method(self, *args, **kwargs) # type: ignore
if isinstance(result, list):
return memoize.List(elements=result) # type: ignore
return result
return typing.cast(F, wrapper)
| ButterSus/KiwiPreview | frontend/parser/memoizetools.py | memoizetools.py | py | 1,460 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "typing.TypeVar",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "typing.... |
7822082403 | # 풀이 중도 포기 (2/1 이어서 시도)
from collections import deque
from sys import stdin
input = stdin.readline
def dfs(h, w):
queue = deque([h, w])
visited[h, w] = True
for i, j in li[h]:
if not visited[j]:
pass
h, w = map(int, input().split())
li = []
res = 0
max = 0
# 육지 바다 정보 입력
for _ in range(h):
li.append(list(map(str, input().split())))
for i in range(h):
for j in range(w):
if li[i][j] == 'L': #육지라면 bfs 탐색 돌림
visited = [[False]*w]*h
res = bfs(i, j)
if res > max:
max = res
print(res)
| Drizzle03/baekjoon_coding | 20230131/2589_Backtracking.py | 2589_Backtracking.py | py | 646 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
}
] |
14059339607 | from utils import WordEmbeddingUtil, TextUtil
from config import Config
import numpy as np
import torch
word2vec_util = None
text_cnn_model = torch.load('../pretrained/text_cnn_static.h5')
def static_text_cnn_word2vec_predict(sentence):
global word2vec_util, text_cnn_model
if word2vec_util is None:
word2vec_util = WordEmbeddingUtil()
text_util = TextUtil()
row = text_util.text_normalization(sentence)
words = text_util.lemmatize_sentence(row)
words = text_util.filter_punctuation(words)
words = text_util.filter_stop_word(words)
words = text_util.get_words_with_len(words)
words_matrix = np.zeros([Config.SENTENCE_MAX_LEN, Config.EMBEDDING_SIZE], dtype=np.float32)
for idx, word in enumerate(words):
words_matrix[idx] = word2vec_util.get_word2vec_vec(word)
text_cnn_model.eval()
words_matrix_tensor = torch.Tensor(words_matrix)
words_matrix_tensor = torch.unsqueeze(words_matrix_tensor, 0)
predict = text_cnn_model(words_matrix_tensor)
result = predict.item()
return result
if __name__ == '__main__':
print(static_text_cnn_word2vec_predict("hello world"))
| miyazawatomoka/QIQC | script/predict.py | predict.py | py | 1,148 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.WordEmbeddingUtil",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.TextUtil",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"l... |
37489105113 | import struct
import utils
from random import randint
from binascii import hexlify
from abci import ABCIServer
from abci import BaseApplication
from abci import ResponseInfo
from abci import ResponseQuery
from abci import ResponseInitChain
from abci import ResponseCheckTx
from abci import ResponseDeliverTx
from abci import ResponseCommit
from abci import CodeTypeOk
from abci.types_pb2 import ResponseEndBlock
from abci.types_pb2 import ResponseBeginBlock
class SimpleCoin(BaseApplication):
"""
Simple cryptocurrency implementation, based on the state model.
Can do two things: sending coins and storing small pices of data
in the blockchain.
"""
def info(self, req):
"""Called by ABCI when the app first starts."""
self.conf = utils.read_conf()
self.db = utils.DatabaseProvider(conf=self.conf)
r = ResponseInfo()
r.last_block_height = self.db.get_block_height()
r.last_block_app_hash = self.db.get_block_app_hash().encode()
return r
def init_chain(self, v):
"""Set initial state on first run"""
for address, balance in self.conf['genesis']['lucky_bois'].items():
self.db.update_state(
address=address,
genesis_balance=balance,
genesis=True
)
self.db.set_block_height(0)
self.db.set_block_app_hash('')
return ResponseInitChain()
def check_tx(self, raw_tx):
"""Validate the Tx before entry into the mempool"""
try: # Check txn syntax
tx = utils.Transaction(raw_tx)
except Exception:
return Result.error(log='txn syntax invalid')
# Check "sender" account has enough coins
if int(self.db.get_address_info(tx.sender)['balance']) < tx.amount:
return ResponseCheckTx(log='insufficient funds', code=1)
if tx.signature_invalid: # Check txn signature
return ResponseCheckTx(log='signature invalid', code=1)
if tx.timestamp_invalid: # Check timestamp for a big delay
return ResponseCheckTx(log='lag time is more than 2 hours', code=1)
# Hooray!
return ResponseCheckTx(code=CodeTypeOk)
def deliver_tx(self, raw_tx):
""" Mutate state if valid Tx """
try: # Handle unvalid txn
tx = utils.Transaction(raw_tx)
except Exception:
return ResponseDeliverTx(log='txn syntax invalid', code=1)
self.new_block_txs.append(tx)
self.db.update_state(tx=tx)
return ResponseDeliverTx(code=CodeTypeOk)
def query(self, reqQuery):
"""Return the last tx count"""
if reqQuery.path == 'balance':
address = reqQuery.data.decode('utf-8')
address_balance = self.db.get_address_info(address)['balance']
rq = ResponseQuery(
code=CodeTypeOk,
key=b'balance',
value=utils.encode_number(int(address_balance))
)
return rq
def begin_block(self, reqBeginBlock):
"""Called to process a block"""
self.new_block_txs = []
return ResponseBeginBlock()
def end_block(self, height):
"""Called at the end of processing. If this is a stateful application
you can use the height from here to record the last_block_height"""
self.db.set_block_height(increment=True)
if self.new_block_txs: # Change app hash only if there any new txns
self.db.set_block_app_hash(utils.get_merkle_root(self.new_block_txs))
return ResponseEndBlock()
def commit(self):
"""Return the current encode state value to tendermint"""
h = self.db.get_block_app_hash().encode()
return ResponseCommit(data=h)
if __name__ == '__main__':
app = ABCIServer(app=SimpleCoin(), port=26658)
app.run()
| SoftblocksCo/Simple_coin | application.py | application.py | py | 3,914 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "abci.BaseApplication",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "utils.read_conf",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "utils.DatabaseProvider",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "abci.Resp... |
39056231859 | from numpy import genfromtxt,where,zeros,nan,ones
from glob import glob
from obspy.core.util.geodetics import gps2DistAzimuth
from matplotlib import pyplot as plt
from obspy import read
from obspy.core import UTCDateTime
from datetime import timedelta
lonepi=-122.3174
latepi=38.2118
time_epi=UTCDateTime('2014-08-24T10:20:44')
tplot=timedelta(seconds=100)
mul=1.5
pgd=genfromtxt('/Users/dmelgar/Napa2014/PGD/napa_test_nolatency.txt')
path='/Users/dmelgar/Napa2014/GPS/sac/'
lonlat=genfromtxt(u'/Users/dmelgar/Napa2014/unr_coords.txt',usecols=[1,2])
lon=lonlat[:,0]
lat=lonlat[:,1]
stas=genfromtxt(u'/Users/dmelgar/Napa2014/unr_coords.txt',usecols=0,dtype='S')
#Get lsit of files
filesn=glob(path+'*LXN.sac')
filese=glob(path+'*LXE.sac')
#Initalize
d=zeros(len(filese)) #epicentral distances
#Loop and plot
dmin=[]
dmax=0
plt.figure()
f,axarr=plt.subplots(1,2)
axe=axarr[1]
axn=axarr[0]
for k in range(len(filese)):
current_sta=filese[k].split("/")[-1].split(".")[0].upper()
i=where(current_sta==stas)[0]
try:
d,az,baz=gps2DistAzimuth(latepi,lonepi,lat[i],lon[i])
d=d/1000
dmin=min([dmin,d])
dmax=max([dmax,d])
except:
d=nan
#Read data
stn=read(filesn[k])
ste=read(filese[k])
#Trim
stn.trim(starttime=time_epi,endtime=time_epi+tplot,pad=True,fill_value=0)
ste.trim(starttime=time_epi,endtime=time_epi+tplot,pad=True,fill_value=0)
#Self Normalize
stn[0].data=stn[0].data/max([stn[0].data.max(),-stn[0].data.min()])
ste[0].data=ste[0].data/max([ste[0].data.max(),-ste[0].data.min()])
dplot=ones(ste[0].times().shape)*d
#Plot
axn.plot(stn[0].times(),stn[0].data*mul+dplot,'k')
axe.plot(ste[0].times(),ste[0].data*mul+dplot,'k')
axn.set_title('North')
axe.set_title('East')
axn.set_ylim(dmin-5,75)
axe.set_ylim(dmin-5,75)
axn.grid()
axe.grid()
axn.set_xlabel('Seconds after OT')
axe.set_xlabel('Seconds after OT')
axn.set_ylabel('Epicentral distance (km)')
axe.yaxis.set_ticklabels([])
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.05, hspace=0)
fig, ax1 = plt.subplots()
ax1.scatter(pgd[:,1],pgd[:,2])
ax1.set_xlabel('Seconds after OT')
ax1.set_xlim(0,100)
ax1.set_ylabel('Mw', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
ax2.scatter(pgd[:,1], pgd[:,3],marker='+', c='r')
ax2.set_ylabel('No. stations', color='r')
ax2.set_ylim(0,50)
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax2.set_xlim(0,100)
plt.show()
| Ogweno/mylife | Napa_stuff/plot_PGD.py | plot_PGD.py | py | 2,500 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "obspy.core.UTCDateTime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.genf... |
3738842637 | import pandas as pd
from bs4 import BeautifulSoup as bs
from splinter import Browser
def init_browser():
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path)
mars_dict = {}
#NASA Mars News
def scrape_mars_news():
try:
browser = init_browser()
news_paragraph_url = "https://mars.nasa.gov/news/"
browser.visit(news_paragraph_url)
news_paragraph_html = browser.html
news_paragraph_soup = bs(news_paragraph_html, "html.parser")
news_title = news_paragraph_soup.find("div", class_="content_title").find("a").text
news_p = news_paragraph_soup.find("div", class_="article_teaser_body").text
mars_dict["news_title"] = news_title
mars_dict["news_p"] = news_p
return mars_dict
finally:
browser.quit()
#JPL Mars Space Images
def scrape_mars_image():
try:
browser = init_browser()
space_images_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(space_images_url)
space_images_html = browser.html
featured_image_soup = bs(space_images_html, "html.parser")
featured_image_link = featured_image_soup.find("article")["style"].replace("background-image: url('", "").replace("');", "")
web_link = "https://www.jpl.nasa.gov"
featured_image_url = web_link + featured_image_link
mars_dict["featured_image_url"] = featured_image_url
return mars_dict
finally:
browser.quit()
#Mars Weather
def scrape_mars_weather():
try:
browser = init_browser()
mars_weather_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(mars_weather_url)
mars_weather_html = browser.html
mars_weather_soup = bs(mars_weather_html, "html.parser")
mars_weather_tweets = mars_weather_soup.find_all("div", class_="js-tweet-text-container")
for each_tweet in mars_weather_tweets:
tweet_text = each_tweet.find("p").text
if "pic.twitter.com" not in tweet_text:
mars_weather = each_tweet.find("p").text
break
else:
pass
mars_dict["mars_weather"] = mars_weather
return mars_dict
finally:
browser.quit()
#Mars Facts
def scrape_mars_facts():
try:
mars_facts_url = "http://space-facts.com/mars/"
mars_facts_df = pd.read_html(mars_facts_url)[0]
mars_facts_df.columns = ["description", "value"]
mars_facts_df.set_index("description", inplace=True)
mars_facts_html = mars_facts_df.to_html()
mars_dict["mars_facts"] = mars_facts_html
return mars_dict
except:
print("error")
#Mars Hemispheres
def scrape_mars_hemispheres():
try:
browser = init_browser()
mars_hemispheres_link = "https://astrogeology.usgs.gov"
mars_hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(mars_hemispheres_url)
mars_hemispheres_html = browser.html
mars_hemispheres_soup = bs(mars_hemispheres_html, "html.parser")
hemisphere_image_urls = []
mars_hemispheres_list = mars_hemispheres_soup.find_all("div", class_="item")
for each_hemisphere in mars_hemispheres_list:
title = each_hemisphere.find("h3").text
mars_hemispheres_image_link = each_hemisphere.find("a", class_="itemLink product-item")["href"]
mars_hemispheres_download_url = mars_hemispheres_link + mars_hemispheres_image_link
browser.visit(mars_hemispheres_download_url)
mars_hemispheres_download_html = browser.html
mars_hemispheres_download_soup = bs(mars_hemispheres_download_html, "html.parser")
mars_hemispheres_full_image_link = mars_hemispheres_download_soup.find("img", class_="wide-image")["src"]
mars_hemispheres_image_url = mars_hemispheres_link + mars_hemispheres_full_image_link
hemisphere_image_urls.append({"title" : title, "img_url" : mars_hemispheres_image_url})
mars_dict["hemisphere_image_urls"] = hemisphere_image_urls
return mars_dict
finally:
browser.quit()
#Scrape mars info
def scrape_mars_info():
try:
scrape_mars_news()
scrape_mars_image()
scrape_mars_weather()
scrape_mars_facts()
scrape_mars_hemispheres()
except:
print("error") | williamsit/Homework | Mission_To_Mars/scrape_mars.py | scrape_mars.py | py | 4,587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "splinter.Browser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup... |
16103607796 | import imp
from multiprocessing.spawn import import_main_path
from django.shortcuts import render
from student.models.students import Student
def index(request):
if request.method == "POST":
name = request.POST.get("name")
adm = request.POST.get("adm")
print(name)
print(adm)
try:
student = Student(name=name,adm=adm)
student.save()
print("done")
except:
print("Fail")
student = Student.objects.all().order_by('-id')
data = {
"students" : student
}
return render(request,'index.html',data) | Python-Guruz/CRUD-DEMO | student/views/students.py | students.py | py | 612 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "student.models.students",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "student.models.students.Student",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "student.models.students.save",
"line_number": 14,
"usage_type": "call"
},
{
... |
32523088106 | import os
from flask import Flask, jsonify, request, send_from_directory, Blueprint
from flask_restful import Api
from werkzeug.utils import secure_filename
from resources.invoice import InvoicesResource, InvoiceResource, MarkDigitizedInvoice
# from config import UPLOAD_FOLDER
UPLOAD_FOLDER = "./uploads/"
ALLOWED_EXTENSIONS = {'pdf'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
api = Api(app)
@app.route("/hello")
def index():
return jsonify({'message': 'hello world'})
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploads/<path:filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
return "Error! No file selected", 400
file = request.files['file']
if file.filename == '':
return "No file selected", 400
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# return redirect(url_for('uploaded_file',
# filename=filename))
if os.path.isfile(os.path.join(app.config['UPLOAD_FOLDER'], filename)):
return 'File uploaded successfully', 200
else:
return 'Server Error in uploading file', 500
else:
return "Invalid file type: {}".format(file.mimetype), 415
return '''
<!doctype html>
<title>Upload new File</title>
<h2>Upload new File</h2>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
# register APIs
api.add_resource(InvoicesResource, "/invoices")
api.add_resource(InvoiceResource, "/invoices/<id>")
api.add_resource(MarkDigitizedInvoice, "/invoices/<id>/digitize")
if __name__ == "__main__":
from db import db
db.init_app(app)
# db.create_all()
app.run(port=5000, debug=True)
| KetanSingh11/Python_Assignment_-_Plate_IQ | plateiq_app/app.py | app.py | py | 2,447 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory... |
36558187570 | import heapq
from typing import List
def topKFrequent(nums: List[int], k: int) -> List[int]: # Verified on Leetcode
frequencies = {}
for num in nums:
if num not in frequencies:
frequencies[num] = 1
else:
frequencies[num] += 1
temp = []
for num, f in frequencies.items():
temp.append((f, num))
min_heap = temp[:k]
heapq.heapify(min_heap)
for item in temp[k:]:
if item[0] > min_heap[0][0]:
heapq.heapreplace(min_heap, item)
return list(map(lambda x: x[1], min_heap))
if __name__ == "__main__":
print(topKFrequent([1, 1, 1, 2, 2, 3], 2)) | InderdeepSync/grokking-coding-interview | top_k_elements/top_k_frequent_elements.py | top_k_frequent_elements.py | py | 646 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "heapq.heapify",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "heapq.heapreplace",
"line_number": 22,
"usage_type": "call"
}
] |
3272420780 | import json
import re
import requests
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.db import IntegrityError
from django.http import HttpResponse
from django.shortcuts import render, redirect
from . import models
OW_API_KEY = "3f59299cb03f1d4beb6bd960a3f546fd"
@login_required
def index(request):
"""Home page view that displays current set of Locations with their weather information
along with available item operations."""
result = ""
appStatus = ""
owner = models.Owner.objects.filter(username=request.user)[0]
if request.method == "GET":
locations = models.Location.objects.filter(owner=owner)
for location in locations:
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}'.format(location.name,
OW_API_KEY)
locationWeather = requests.get(url).json()
if locationWeather['cod'] == 200:
location.temperature = locationWeather['main']['temp']
location.description = locationWeather['weather'][0]['description']
location.icon = locationWeather['weather'][0]['icon']
location.save()
else:
appStatus = "Refresh operation for {} failed. This could be an issue related with OpenWeatherMap, " \
"please contact with the administrator.".format(location.name)
result = "Fail"
break
if result != "Fail":
orderList = models.Owner.objects.filter(username=request.user).values('orderList')[0]['orderList']
if orderList != "":
orderList = orderList.split(',')
sortedLocations = []
for locName in orderList:
sortedLocations.append(locations.get(name=locName))
return render(request, "index.html", {"locations": sortedLocations})
else:
return render(request, "index.html", {"locations": locations})
elif request.POST["submit"] == "Create":
locationName = request.POST['locationName']
if locationName == "":
appStatus = "Please choose a valid location name"
result = "Fail"
else:
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}'.format(locationName,
OW_API_KEY)
locationWeather = requests.get(url).json()
if locationWeather['cod'] == 200:
try:
if models.Location.objects.count() == 0:
newLocId = 0
else:
newLocId = models.Location.objects.latest('locID').locID + 1
models.Location.objects.create(locID=newLocId, name=locationWeather['name'],
temperature=locationWeather['main']['temp'],
description=locationWeather['weather'][0]['description'],
icon=locationWeather['weather'][0]['icon'], owner=owner)
oldOrderList = models.Owner.objects.filter(username=request.user).values('orderList')[0]['orderList']
if oldOrderList != "":
newOrderList = oldOrderList + ',' + locationWeather['name']
models.Owner.objects.filter(username=request.user).update(orderList=newOrderList)
except IntegrityError:
appStatus = "Please choose a location name which does not exists in your current set of " \
"locations."
result = "Fail"
elif locationWeather['cod'] == '404' and locationWeather['message'] == 'city not found':
appStatus = "Location could not be found, please make sure that you enter a valid location name."
result = "Fail"
else:
appStatus = "Create operation failed. This could be an issue related with OpenWeatherMap, " \
"please contact with the administrator."
result = "Fail"
elif request.POST["submit"] == "Delete":
locationName = request.POST['locationName']
if locationName == "":
appStatus = "Please choose a valid location name"
result = "Fail"
else:
try:
models.Location.objects.filter(owner=owner).get(name=locationName).delete()
oldOrderList = models.Owner.objects.filter(username=request.user).values('orderList')[0]['orderList']
newOrderList = re.sub(locationName + ',', "", oldOrderList)
if len(oldOrderList) == len(newOrderList):
newOrderList = re.sub(',' + locationName, "", oldOrderList)
models.Owner.objects.filter(username=request.user).update(orderList=newOrderList)
except models.Location.DoesNotExist:
appStatus = "Delete operation failed. Please make sure that location name " \
"exists in current set of Locations"
result = "Fail"
elif request.POST["submit"] == "LocationSort":
orderList = request.POST['orderList']
try:
orderList = json.loads(orderList)
models.Owner.objects.filter(username=request.user).update(orderList=orderList)
except models.Owner.DoesNotExist:
appStatus = "Sorting operation failed. Please make sure that owner " \
"exists in WeatherApp system"
result = "Fail"
elif request.POST["submit"] == "Refresh":
try:
locations = models.Location.objects.filter(owner=owner)
for location in locations:
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}'.format(location.name,
OW_API_KEY)
locationWeather = requests.get(url).json()
if locationWeather['cod'] == 200:
location.temperature = locationWeather['main']['temp']
location.description = locationWeather['weather'][0]['description']
location.icon = locationWeather['weather'][0]['icon']
location.save()
else:
appStatus = "Refresh operation for {} failed. This could be an issue related with OpenWeatherMap, " \
"please contact with the administrator.".format(location.name)
result = "Fail"
break
except models.Location.DoesNotExist:
appStatus = "Refreshing operation failed. Please make sure that user exists" \
"exists in current set of Locations"
result = "Fail"
elif request.POST["submit"] == "Delete All":
try:
models.Location.objects.filter(owner=owner).delete()
models.Owner.objects.filter(username=request.user).update(orderList="")
except models.Location.DoesNotExist:
appStatus = "Deleting all operation failed, no locations seems to exist."
result = "Fail"
if result == "":
result = "Success"
locations = models.Location.objects.filter(owner=owner)
orderList = models.Owner.objects.filter(username=request.user).values('orderList')[0]['orderList']
if orderList != "":
orderList = orderList.split(',')
sortedLocations = []
for locName in orderList:
sortedLocations.append(locations.get(name=locName))
locations = sortedLocations
return responseLocations(result, appStatus, locations)
def signup(request):
"""SignUp page view that signs up new user to the system, according to given information."""
if request.method == 'POST':
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
try:
user = models.Owner.objects.create_user(username, email, password)
login(request, user)
return redirect('index')
except IntegrityError:
appStatus = "Oops! It seems like this username is taken, please choose another username."
return render(request, 'signup.html', {'status': appStatus})
else:
return render(request, 'signup.html')
def responseLocations(result, statusMsg, locations):
"""Helper function for returning an app request result in JSON HttpResponse"""
locations = serializers.serialize("json", locations)
return HttpResponse(json.dumps({'result': result, 'appStatus': statusMsg,
'locations': locations}), 'text/json')
| ysyesilyurt/WeatherApp | WeatherApp/views.py | views.py | py | 9,163 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "requests... |
4435033563 |
import requests
from currency_codes import CURRENCIES
API_KEY = '82e68121413a404dc85fd537'
def get_rate(currency):
url = f"https://v6.exchangerate-api.com/v6/{API_KEY}/pair/{currency}/UZS"
try:
response = requests.get(url)
rate = response.json()['conversion_rate']
except:
rate = False
return rate
def get_currency_codes():
code_list = ""
for curr_code in CURRENCIES:
code_list += f"/{curr_code[0]} - {curr_code[1]}\n"
return code_list
def is_currency_code(currency):
return currency in dict((x, y) for x, y in CURRENCIES)
def get_ordered_rate_list(sort_in_desc=False):
rate_dict = {}
for code in CURRENCIES:
rate = get_rate(code[0])
if not (rate is False):
rate_dict[code[0]] = rate
sorted_tuple = sorted(rate_dict, key=rate_dict.get, reverse=sort_in_desc)
rate_list = ""
for code in sorted_tuple:
rate_list += f"1 {code} = {rate_dict[code]} UZS\n"
return rate_list
| otabek-usmonov/uzs-exchangerate-bot | currency_rate_info.py | currency_rate_info.py | py | 921 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "currency_codes.CURRENCIES",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "currency_codes.CURRENCIES",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "curr... |
27115300498 | from django.shortcuts import render, redirect
from application.models import *
# Create your views here.
def index(request):
context= {
'Users': User.objects.all()
}
return render(request, 'index.html', context)
def submit_user(request):
User.objects.create(
first_name=request.POST['fname'],
last_name=request.POST['lname'],
age=request.POST['age'],
email=request.POST['email'],
)
return redirect('/') | beattietrey/Coding-Dojo | python_stack/django/django_fullstack/assignments/users_with_templates/application/views.py | views.py | py | 468 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 18,
"usage_type": "call"
}
] |
25161970451 | import json
import logging
import requests
from dacite import from_dict
from typing import Any
from adyen_gift_card.api.adyen_notifications.request import NotificationRequestItem
from adyen_gift_card.infrastructure.newstore_client.client_response import NewStoreError
from newstore_common.json.multi_encoder import MultiToValueEncoder
LOGGER = logging.getLogger()
class NewStoreClient:
def __init__(self, tenant: str, stage: str, provider_name: str):
self.tenant = tenant
self.stage = stage
self.provider_name = provider_name
def send_notification(self, action: str, notification_item: NotificationRequestItem, json_data: Any) -> NewStoreError:
idempotency_key = notification_item.merchant_reference
instrument_id = notification_item.original_reference
url = f'https://{self.tenant}.{self.stage}.newstore.net/v0/d/payment_providers/{action}/' \
f'{self.provider_name}/{idempotency_key}/{instrument_id}'
json_data = json.loads(json.dumps(json_data, cls=MultiToValueEncoder))
LOGGER.info(f'POST: {url} -- {json_data}')
resp = requests.post(url=url, json=json_data)
LOGGER.info(f'http response: {resp.text}')
error = None
if resp.status_code != 200:
error = from_dict(data_class=NewStoreError, data=resp.json())
return error
| NewStore/int-cinori | integrations/adyen_gift_card/adyen_gift_card/infrastructure/newstore_client/client.py | client.py | py | 1,368 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "adyen_gift_card.api.adyen_notifications.request.NotificationRequestItem",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 21,
"usage_type": "... |
74114165863 | import frappe
import os
import json
import sys
# bench execute mfi_customization.mfi.patch.migrate_patch.get_custom_role_permission
def get_custom_role_permission(site=None):
if sys.argv[2]=='--site':
os.system("bench --site {0} export-fixtures".format(sys.argv[3]))
else:
os.system("bench export-fixtures")
# bench execute mfi_customization.mfi.patch.migrate_patch.set_custom_role_permission
def set_custom_role_permission():
with open(frappe.get_app_path("mfi_customization","fixtures","custom_docperm.json")) as f:
for d in json.load(f):
if len(frappe.get_all('Custom DocPerm',{'parent':d.get('parent'),'role':d.get('role')}))==0:
role=frappe.new_doc('Custom DocPerm')
for k in d.keys():
role.set(k,d.get(k))
role.save()
| Bizmap-Technologies-Pvt-Ltd/mfi_customization- | mfi_customization/mfi/patch/migrate_patch.py | migrate_patch.py | py | 848 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 1... |
73857321062 | import numpy as np
from munch import DefaultMunch
from sklearn.model_selection import train_test_split
from tests import config_params, compas_dataset_class, compas_without_sensitive_attrs_dataset_class
from virny.utils.common_helpers import validate_config, confusion_matrix_metrics
def test_validate_config_true1(config_params):
actual = validate_config(config_params)
assert actual == True
def test_validate_config_true2():
config_dct = {
"dataset_name": 'COMPAS',
"bootstrap_fraction": 0.8,
"n_estimators": 100,
"sensitive_attributes_dct": {'sex': 0, 'race': 'Caucasian'},
}
config = DefaultMunch.fromDict(config_dct)
actual = validate_config(config)
assert actual == True
def test_validate_config_false1():
config_dct = {
"dataset_name": 'COMPAS',
"bootstrap_fraction": 0.8,
"n_estimators": 100,
"sensitive_attributes_dct": {'sex': 0, 'race': 'Caucasian', 'sex&race&age': None},
}
config = DefaultMunch.fromDict(config_dct)
try:
actual = validate_config(config)
except ValueError:
actual = False
assert actual == False
def test_validate_config_false2():
config_dct = {
"dataset_name": 'COMPAS',
"bootstrap_fraction": 1.8,
"n_estimators": 100,
"sensitive_attributes_dct": {'sex': 0, 'race': 'Caucasian'},
}
config = DefaultMunch.fromDict(config_dct)
try:
actual = validate_config(config)
except ValueError:
actual = False
assert actual == False
def test_validate_config_false3():
config_dct = {
"dataset_name": 'COMPAS',
"bootstrap_fraction": 1.8,
"n_estimators": 100,
"sensitive_attributes_dct": {'sex': 0, 'sex&race': None},
}
config = DefaultMunch.fromDict(config_dct)
try:
actual = validate_config(config)
except ValueError:
actual = False
assert actual == False
def test_confusion_matrix_metrics():
y_true = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1])
y_preds = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1])
actual_metrics = confusion_matrix_metrics(y_true, y_preds)
required_fields = ['TPR', 'TNR', 'PPV', 'FNR', 'FPR', 'Accuracy', 'F1', 'Selection-Rate', 'Positive-Rate']
for field in required_fields:
assert field in actual_metrics.keys()
| DataResponsibly/Virny | tests/utils/test_common_helpers.py | test_common_helpers.py | py | 2,369 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "virny.utils.common_helpers.validate_config",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tests.config_params",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "munch.DefaultMunch.fromDict",
"line_number": 21,
"usage_type": "call"
... |
25719962431 | import nmap
import main
import xlsxwriter
nmScan = nmap.PortScanner()
def scan_ip(host):
nombre = main.checkoutput()
if nombre == "print":
print('Host : %s (%s)' % (host, nmScan[host].hostname()))
print('State : %s' % nmScan[host].state())
for proto in nmScan[host].all_protocols():
print('----------')
print('Protocol : %s' % proto)
lport = nmScan[host][proto].keys()
lport.sort()
for port in lport:
print ('port : %s\tstate : %s' % (port, nmScan[host][proto][port]['state']))
elif nombre.endswith(".xlsx"):
workbook = xlsxwriter.Workbook(nombre)
for proto in nmScan[host].all_protocols():
fila = 2
worksheet = workbook.add_worksheet(proto)
worksheet.write(1, 1, "Anfitrion")
worksheet.write(1, 2, "Protocolo")
worksheet.write(1, 3, "Puerto")
worksheet.write(1, 4, "Estado")
worksheet.write(2, 1, nmScan[host].hostname())
worksheet.write(2, 2, proto)
lport = nmScan[host][proto].keys()
lport.sort()
for port in lport:
worksheet.write(fila, 3, port)
worksheet.write(fila, 4, nmScan[host][proto][port]['state'])
fila += 1 | mepiadmw/PIA-Ciberseguridad | scan_ip.py | scan_ip.py | py | 1,175 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nmap.PortScanner",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "main.checkoutput",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "xlsxwriter.Workbook",
"line_number": 19,
"usage_type": "call"
}
] |
30326229759 | import pandas as pd
import numpy as np
from statsmodels.stats.outliers_influence import variance_inflation_factor
def forward_delete_corr(data):
# 计算相关系数矩阵
corr = data.corr().abs()
# 选取相关系数矩阵的上三角部分
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(bool))
# 找出相关系数大于0.7的变量并添加到待删除列表中
to_delete = [column for column in upper.columns if any(upper[column] > 0.7)]
print("相关性删除列: ", to_delete)
return to_delete
def get_low_vif_cols(data, save_path):
to_delete = []
# 循环剔除VIF值大于10的变量,直至所有变量的VIF值均小于10
while True:
vif = pd.DataFrame()
vif["variables"] = data.columns
vif["VIF"] = [variance_inflation_factor(data.values, i) for i in range(data.shape[1])]
vif.to_csv(save_path)
if vif["VIF"].max() > 10:
# 找出VIF值最大的变量并删除
col_to_drop = vif.loc[vif["VIF"].idxmax(), "variables"]
to_delete.append(col_to_drop)
data = data.drop(col_to_drop, axis=1)
else:
break
print("多重共线性删除列: ", to_delete)
return to_delete
def get_low_var_cols(data):
var = data.var()
to_delete = var[var < 1].index.tolist()
print("方差删除列: ", to_delete)
return to_delete
def get_single_enum_cols(data):
to_delete = []
for col in data.columns:
if len(data[col].value_counts()) > 1:
value_counts = data[col].value_counts(normalize=True)
if (value_counts >= 0.9).sum() > 0:
to_delete.append(col)
print("枚举值删除列: ", to_delete)
return to_delete
| Whale-lyi/simple-predict | filter.py | filter.py | py | 1,753 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.triu",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "statsmodels.stats.outliers_inf... |
22778807898 | import copy
import numpy as np
import random
from collections import defaultdict
from torch.utils.data.sampler import Sampler
class RandomClassSampler(Sampler):
"""Randomly samples N classes each with K instances to
form a minibatch of size N*K.
Modified from https://github.com/KaiyangZhou/deep-person-reid.
Args:
data_source (list): list of Datums.
batch_size (int): batch size.
n_ins (int): number of instances per class to sample in a minibatch.
"""
def __init__(self, data_source, batch_size, n_ins):
if batch_size < n_ins:
raise ValueError(
"batch_size={} must be no less "
"than n_ins={}".format(batch_size, n_ins)
)
self.data_source = data_source
self.batch_size = batch_size
self.n_ins = n_ins
self.ncls_per_batch = self.batch_size // self.n_ins
self.index_dic = defaultdict(list)
for index, item in enumerate(data_source):
self.index_dic[item.label].append(index)
self.labels = list(self.index_dic.keys())
assert len(self.labels) >= self.ncls_per_batch
# estimate number of images in an epoch
self.length = len(list(self.__iter__()))
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for label in self.labels:
idxs = copy.deepcopy(self.index_dic[label])
if len(idxs) < self.n_ins:
idxs = np.random.choice(idxs, size=self.n_ins, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.n_ins:
batch_idxs_dict[label].append(batch_idxs)
batch_idxs = []
avai_labels = copy.deepcopy(self.labels)
final_idxs = []
while len(avai_labels) >= self.ncls_per_batch:
selected_labels = random.sample(avai_labels, self.ncls_per_batch)
for label in selected_labels:
batch_idxs = batch_idxs_dict[label].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[label]) == 0:
avai_labels.remove(label)
return iter(final_idxs)
def __len__(self):
return self.length
| MaXuSun/domainext | domainext/data/samplers/random_class.py | random_class.py | py | 2,346 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "torch.utils.data.sampler.Sampler",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 40,
"usage_type": "call"
},
{
"a... |
44682923693 | from flask import Flask, render_template, request, session, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
import wikipedia as wk
import random
import re
from retry import retry
from nltk.tokenize import sent_tokenize
import nltk
nltk.download('all')
#TODO - BETTER TEXT REPLACE HE/HER - WIKIPEDIA BETTER SEARCH (KNOWLEDGE TREE?) - CSS (PACKAGE?)
#------------
app = Flask(__name__)
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
app.secret_key = "123"
@app.route('/', methods=['GET',"POST"])
def home():
def findfamous():
with open("data/famouspeople.txt","r") as f:
lines = f.readlines()
person = random.choice(lines).strip()
return person
@retry(FileNotFoundError, delay=1, tries=5)
def findfacts():
famousperson = findfamous()
famousperson = famousperson.replace(" ","_")
try:
result = wk.summary(famousperson, auto_suggest=False) #sentences = 10
famousperson = famousperson.replace(" ","_")
except Exception as e:
raise FileNotFoundError
return(famousperson,result)
def cleandata(tup):
name = tup[0].replace("_"," ")
text = tup[1]
prohibitedWords = []
prohibitedWords.append(name)
for i in name.split(" "):
prohibitedWords.append(i)
big_regex = re.compile('|'.join(map(re.escape, prohibitedWords)))
result = big_regex.sub("XXXXXXX", text)
result = result.replace(" She "," They ").replace(" He "," They ").replace(" His "," Their ").replace(" Her "," Their ")
#.replace("his","their").replace("her","their")
#here NLTK
print("pre")
randomlines = sent_tokenize(result)
randomlines.pop(0)
randomlines.pop(0)
print("post")
randomFact = random.choice(randomlines)
num = random.randint(1,3)
return (randomFact,name,num)
def gameloop():
result,name,num = (cleandata(findfacts()))
guesses = [0,0,0,0,0,0]
guesses[num] = name
guesses = guesses[1:6]
for j,i in enumerate(guesses):
if i == 0:
guesses[j] = findfamous()
return result,guesses,name,num
correctornot="?"
if session.get("points") is not None:
pass
else:
session["points"] = 0
if request.method == 'POST':
if request.form['submit_button'] == 'New Try':
result,guesses,name,num = gameloop()
session['name'] = name.split(" ")[0]
print("New Try")
print(guesses)
return render_template("home.html",result = result, guesses = guesses,correctornot=correctornot,points = session["points"])
elif request.form['submit_button'] != 'New Try':
submi = request.form['submit_button']
print("player clicked button")
print(submi)
print(session['name'])
if submi == session['name']:
session["points"] = session["points"] + 1
return render_template("home.html",correctornot=correctornot,result = "correct",points = session["points"])
if submi != session['name']:
session["points"] = session["points"] - 1
return render_template("home.html",correctornot=correctornot,result = "wrong",points = session["points"])
elif request.method == 'GET':
print("No Post Back Call")
return render_template('home.html', result = "Click play to get started!", guesses = [],points = session["points"])
if __name__ == '__main__':
app.run() | Freskoko/WikipediaQuizFlask | app.py | app.py | py | 3,771 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "wikipedia.summary",
"line_... |
5940263315 | from functools import reduce
import math
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
import torch.nn.functional as F
from model.layers import *
from model.losses import *
class GraphRecommender(nn.Module):
def __init__(self, opt, num_node, adj, len_session, n_train_sessions):
super(GraphRecommender, self).__init__()
self.opt = opt
self.batch_size = opt.batch_size
self.num_node = num_node
self.len_session = len_session
self.dim = opt.dim
self.item_embedding = nn.Embedding(num_node + 1, self.dim,
padding_idx=0)
self.pos_embedding = nn.Embedding(self.len_session, self.dim)
self.ssl_task = SSLTask(opt)
self.item_conv = GlobalItemConv(layers=opt.layers)
self.w_k = opt.w_k
self.adj = adj
self.dropout = opt.dropout
self.n_sessions = n_train_sessions
self.memory_bank = torch.empty((n_train_sessions, self.dim))
# pos attention
self.w_1 = nn.Parameter(torch.Tensor(2 * self.dim, self.dim))
self.w_2 = nn.Parameter(torch.Tensor(self.dim, 1))
self.glu1 = nn.Linear(self.dim, self.dim)
self.glu2 = nn.Linear(self.dim, self.dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.dim)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def compute_sess_emb(self, item_seq, hidden, rev_pos=True, attn=True):
batch_size = hidden.shape[0]
len = hidden.shape[1]
mask = torch.unsqueeze((item_seq != 0), -1)
hs = torch.sum(hidden * mask, -2) / torch.sum(mask, 1)
hs = hs.unsqueeze(-2).repeat(1, len, 1)
nh = hidden
if rev_pos:
pos_emb = self.pos_embedding.weight[:len]
pos_emb = torch.flip(pos_emb, [0]) # reverse order
pos_emb = pos_emb.unsqueeze(0).repeat(batch_size, 1, 1)
nh = torch.matmul(torch.cat([pos_emb, hidden], -1), self.w_1)
nh = torch.tanh(nh)
nh = torch.sigmoid(self.glu1(nh) + self.glu2(hs))
if attn:
beta = torch.matmul(nh, self.w_2)
beta = beta * mask
sess_emb = torch.sum(beta * hidden, 1)
else:
sess_emb = torch.sum(nh * hidden, 1)
return sess_emb
def compute_con_loss(self, batch, sess_emb, item_embs):
mask = torch.unsqueeze((batch['inputs'] != 0), -1)
last_item_pos = torch.sum(mask, dim=1) - 1
last_items = torch.gather(batch['inputs'], dim=1, index=last_item_pos).squeeze()
last_items_emb = item_embs[last_items]
pos_last_items_emb = item_embs[batch['pos_last_items']]
neg_last_items_emb = item_embs[batch['neg_last_items']]
pos_target_item_emb = item_embs[batch['targets']]
neg_targets_item_emb = item_embs[batch['neg_targets']]
con_loss = self.ssl_task(sess_emb, last_items_emb, pos_last_items_emb, neg_last_items_emb,
pos_target_item_emb, neg_targets_item_emb)
return con_loss
def forward(self, batch, cl=False):
items, inputs, alias_inputs = batch['items'], batch['inputs'], batch['alias_inputs']
graph_item_embs = self.item_conv(self.item_embedding.weight, self.adj)
hidden = graph_item_embs[items]
# dropout
hidden = F.dropout(hidden, self.dropout, training=self.training)
alias_inputs = alias_inputs.view(-1, alias_inputs.size(1), 1).expand(-1, -1, self.dim)
seq_hidden = torch.gather(hidden, dim=1, index=alias_inputs)
# reverse position attention
sess_emb = self.compute_sess_emb(inputs, seq_hidden, rev_pos=True, attn=True)
# weighted L2 normalization: NISER, DSAN, STAN, COTREC
select = self.w_k * F.normalize(sess_emb, dim=-1, p=2)
graph_item_embs_norm = F.normalize(graph_item_embs, dim=-1, p=2)
scores = torch.matmul(select, graph_item_embs_norm.transpose(1, 0))
con_loss = torch.Tensor(0)
if cl:
con_loss = self.compute_con_loss(batch, select, graph_item_embs_norm)
return scores, con_loss
| dbis-uibk/SPARE | model/recommender.py | recommender.py | py | 4,257 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
4509075711 | import cv2
import numpy as np
import depthai
import threading
import sys
import os
import time
# Global variables
selected_points = []
completed = False
# Global variables
dataset = "kitti"
img_size = [3, 352, 1216] # for kitti
frame = None
is_frame_available = False
stop_capture = threading.Event() # Event object to signal stop
# Function to continuously capture frames
def capture_frames():
global frame, is_frame_available
# Create the pipeline and camera node
pipeline = depthai.Pipeline()
cam = pipeline.createColorCamera()
#Unsupported resolution set for detected camera IMX378/214, needs THE_1080_P / THE_4_K / THE_12_MP.
cam.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
#cam.initialControl.setManualFocus(150) # 0..255 (larger for near objects)
# Focus:
# value 150 == 22cm
# value 140 == 36cm
xoutRgb = pipeline.createXLinkOut()
xoutRgb.setStreamName("rgb")
cam.video.link(xoutRgb.input)
# Start the pipeline
with depthai.Device(pipeline) as device:
# Output queue for the frames
q_rgb = device.getOutputQueue(name="rgb", maxSize=1, blocking=False)
print('Connected cameras:', device.getConnectedCameraFeatures())
print('Usb speed:', device.getUsbSpeed().name)
if device.getBootloaderVersion() is not None:
print('Bootloader version:', device.getBootloaderVersion())
# Device name
print('Device name:', device.getDeviceName())
while not stop_capture.is_set():
# Get the RGB frame
in_rgb = q_rgb.tryGet()
#focus_value = q_rgb.getCtrlValue(depthai.CameraControl.CamCtrl.FOCUS)
#print("Focus = ",focus_value)
if in_rgb is not None:
# Convert the NV12 format to BGR
frame = in_rgb.getCvFrame()
# Set the flag to indicate that a new frame is available
is_frame_available = True
def sort_coordinates(selected_points):
# Sort the points by x-coordinate
sorted_points = sorted(selected_points, key=lambda p: p[0])
# Determine the top-left and top-right points
if sorted_points[0][1] < sorted_points[1][1]:
top_left, bottom_left = sorted_points[0], sorted_points[1]
else:
top_left, bottom_left = sorted_points[1], sorted_points[0]
# Determine the bottom-right and bottom-left points
if sorted_points[2][1] < sorted_points[3][1]:
top_right, bottom_right = sorted_points[2], sorted_points[3]
else:
top_right, bottom_right = sorted_points[3], sorted_points[2]
final_sorted_points = [top_left, top_right, bottom_right, bottom_left]
return final_sorted_points
# Mouse callback function for selecting points
def store_points(event, x, y, flags, param):
global selected_points, completed, frame, is_frame_available
while not is_frame_available:
pass
window_name = 'Select 4 Corners of your screen'
if event == cv2.EVENT_LBUTTONDOWN:
if len(selected_points) < 4:
selected_points.append((x, y))
for (x,y) in selected_points:
cv2.circle(frame, (x, y), 9, (0, 255, 0), -1)
cv2.imshow(window_name, frame)
# cv2.waitKey(0)
print((x,y))
if len(selected_points) == 4:
completed = True
def select_points():
# Create a window and set the mouse callback
# Capture a photo through webcam and save it in the same directory structure
screen_width, screen_height = 1920, 1080 # Replace with your screen resolution
# Calculate the dimensions for the left half of the screen
left_half_x = -10
left_half_y = 0
left_half_width = screen_width // 2
left_half_height = screen_height
window_name = 'Image to be captured'
# Create a resizable window for the webcam feed
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.moveWindow(window_name, left_half_x, left_half_y)
cv2.resizeWindow(window_name, left_half_width, left_half_height)
sample_image_path = "/home/vision/suraj/kitti_dataset/KITTI/2011_09_28/2011_09_28_drive_0001_sync/image_02/data/0000000000.png"
image = cv2.imread(sample_image_path,-1)
h,w,_ = image.shape
pad_x = int(w)
pad_y = int(((screen_height*w)/(screen_width*.5)-h)/2)
print(image.shape)
top_padding = np.zeros((pad_y,pad_x,3),dtype=np.uint8)
bottom_padding = np.zeros((pad_y,pad_x,3),dtype=np.uint8)
image = np.vstack((top_padding,image,bottom_padding))
# if dataset == "kitti": # do kb_crop
# height = img_size[1]
# width = img_size[2]
# top_margin = int(height - 352)
# left_margin = int((width - 1216) / 2)
# image = image[top_margin:top_margin + 352, left_margin:left_margin + 1216]
cv2.imshow(window_name, image)
# cv2.waitKey(1)
global selected_points, frame, is_frame_available
window_name = 'Select 4 Corners of your screen'
screen_width, screen_height = 1920, 1080 # Replace with your screen resolution
# Calculate the dimensions for the right half of the screen
right_half_x = screen_width // 2
right_half_y = screen_height
right_half_width = screen_width // 2
right_half_height = screen_height
# window_name = 'Select Points'
# Create a resizable window for the camera feed
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.moveWindow(window_name, right_half_x, 0)
cv2.resizeWindow(window_name, right_half_width, right_half_height)
cv2.setMouseCallback(window_name, store_points)
# Instructions
print("Please select 4 corner points of the rectangular screen.")
while True:
while not is_frame_available:
pass
#img = frame.copy()
# Draw a circle to mark the selected point
for (x,y) in selected_points:
cv2.circle(frame, (x, y), 9, (0, 255, 0), -1)
# Display the image
cv2.imshow(window_name, frame)
# Wait for the user to select points
if completed:
break
# Check for key press
key = cv2.waitKey(1)
if key == ord('q'):
sys.exit(0)
break
cv2.destroyAllWindows()
def display_frame(kitti_read_path,kitti_write_path,data_splits_file):
# Path to the data splits file
# Define the destination points (a rectangle)
width, height = 1242, 375 #kitti
dst_points = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]], dtype=np.float32)
global selected_points, frame, is_frame_available
selected_points = sort_coordinates(selected_points)
print("Selected points are:",selected_points)
# Convert the selected points to numpy array
src_points = np.array(selected_points, dtype=np.float32)
# Perform the homography transformation
M, _ = cv2.findHomography(src_points, dst_points)
# Read the data splits file
with open(data_splits_file, 'r') as file:
lines = file.readlines()
# Process each image path
for idx,line in enumerate(lines):
image_path = line.strip().split(" ")[0]
if image_path.split("/")[0] == "2011_09_26":
continue # as 1st folder is done
read_path = os.path.join(kitti_read_path,image_path)
write_path = os.path.join(kitti_write_path,image_path)
save_dir = os.path.dirname(write_path)
os.makedirs(save_dir,exist_ok=True)
# Load the RGB image
rgb_image = cv2.imread(read_path,-1)
#rgb_image = cv2.resize(rgb_image,(width, height))
if rgb_image is not None:
# # Create a delay of 0.5 seconds
# time.sleep(0.5)
# Capture a photo through webcam and save it in the same directory structure
screen_width, screen_height = 1920, 1080 # Replace with your screen resolution
# Calculate the dimensions for the left half of the screen
left_half_x = -10
left_half_y = 0
left_half_width = screen_width // 2
left_half_height = screen_height
h,w,_ = rgb_image.shape
pad_x = int(w)
pad_y = int(((screen_height*w)/(screen_width*.5)-h)/2)
top_padding = np.zeros((pad_y,pad_x,3),dtype=np.uint8)
bottom_padding = np.zeros((pad_y,pad_x,3),dtype=np.uint8)
rgb_image = np.vstack((top_padding,rgb_image,bottom_padding))
#print(rgb_image.shape)
window_name = 'Image to be captured'
# Create a resizable window for the webcam feed
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.moveWindow(window_name, left_half_x, left_half_y)
cv2.resizeWindow(window_name, left_half_width, left_half_height)
#image_name_ = os.path.basename(read_path)
#cv2.putText(rgb_image,f"{image_name_}",(325,690), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA)
# sample_image_path = "/home/vision/suraj/kitti_dataset/KITTI/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png"
# sample_image = cv2.imread(sample_image_path,-1)
cv2.imshow(window_name,rgb_image)
#global counter_video_started
cv2.waitKey(400)
#time.sleep(2)
global frame, is_frame_available
while not is_frame_available:
pass
captured_frame = frame.copy()
#cv2.waitKey(1000)
#time.sleep(2)
# Warp the image
modified_frame = cv2.warpPerspective(captured_frame, M, (width, height))
#print("warped image's shape = ",modified_frame.shape)
# Display the frame
screen_width, screen_height = 1920, 1080 # Replace with your screen resolution
# Calculate the dimensions for the right half of the screen
right_half_x = screen_width // 2
right_half_y = screen_height
right_half_width = screen_width // 2
right_half_height = screen_height
# window_name = 'Verify Captured Image'
# Create a resizable window for the camera feed
# cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
# cv2.moveWindow(window_name, right_half_x, 0)
# cv2.resizeWindow(window_name, right_half_width, right_half_height)
# cv2.imshow(window_name, modified_frame)
cv2.imwrite(write_path, modified_frame)
# Check for the 'q' key to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
stop_capture.set()
break
#save image in write_path
# Start the frame capture thread
capture_thread = threading.Thread(target=capture_frames)
capture_thread.start()
#select 4 points of the screen
select_points()
kitti_read_path = "/home/vision/suraj/kitti_dataset/KITTI"
kitti_write_path = "/home/vision/suraj/kitti_dataset/KITTI_captured_from_oak1"
data_splits_file = '/home/vision/suraj/Pixelformer_jetson/data_splits/kitti_all_data_for_data_capture_from_camera_from_2nd.txt' # Replace with the actual path to your data splits file
display_frame(kitti_read_path,kitti_write_path,data_splits_file)
#perform homography
#perform_homography()
# Wait for the frame capture thread to finish
capture_thread.join()
# Release resources
cv2.destroyAllWindows()
| surajiitd/jetson-documentation | model_compression/capture_dataset.py | capture_dataset.py | py | 11,499 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "threading.Event",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "depthai.Pipeline",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "depthai.ColorCameraProperties",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "d... |
35398028388 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from contextlib import contextmanager
import os
import pytest
from textwrap import dedent
from pants.base.address import SyntheticAddress, BuildFileAddress
from pants.base.address_lookup_error import AddressLookupError
from pants.base.build_configuration import BuildConfiguration
from pants.base.build_file import BuildFile
from pants.base.build_file_parser import BuildFileParser
from pants.base.build_graph import BuildGraph
from pants.base.build_root import BuildRoot
from pants.base.target import Target
from pants.util.contextutil import pushd, temporary_dir
from pants.util.dirutil import touch
from pants_test.base_test import BaseTest
# TODO(Eric Ayers) There are many untested methods in BuildGraph left to be tested.
class BuildGraphTest(BaseTest):
@contextmanager
def workspace(self, *buildfiles):
with temporary_dir() as root_dir:
with BuildRoot().temporary(root_dir):
with pushd(root_dir):
for buildfile in buildfiles:
touch(os.path.join(root_dir, buildfile))
yield os.path.realpath(root_dir)
# TODO(Eric Ayers) This test broke during a refactoring and should be moved, removed or updated
@pytest.mark.xfail
def test_transitive_closure_spec(self):
with self.workspace('./BUILD', 'a/BUILD', 'a/b/BUILD') as root_dir:
with open(os.path.join(root_dir, './BUILD'), 'w') as build:
build.write(dedent('''
fake(name="foo",
dependencies=[
'a',
])
'''))
with open(os.path.join(root_dir, 'a/BUILD'), 'w') as build:
build.write(dedent('''
fake(name="a",
dependencies=[
'a/b:bat',
])
'''))
with open(os.path.join(root_dir, 'a/b/BUILD'), 'w') as build:
build.write(dedent('''
fake(name="bat")
'''))
build_configuration = BuildConfiguration()
build_configuration.register_target_alias('fake', Target)
parser = BuildFileParser(build_configuration, root_dir=root_dir)
build_graph = BuildGraph(self.address_mapper)
parser.inject_spec_closure_into_build_graph(':foo', build_graph)
self.assertEqual(len(build_graph.dependencies_of(SyntheticAddress.parse(':foo'))), 1)
# TODO(Eric Ayers) This test broke during a refactoring and should be moved, removed or updated
@pytest.mark.xfail
def test_target_invalid(self):
self.add_to_build_file('a/BUILD', 'target(name="a")')
with pytest.raises(BuildFileParser.InvalidTargetException):
self.build_graph.inject_spec_closure('a:nope')
self.add_to_build_file('b/BUILD', 'target(name="a")')
with pytest.raises(BuildFileParser.InvalidTargetException):
self.build_graph.inject_spec_closure('b')
with pytest.raises(BuildFileParser.InvalidTargetException):
self.build_graph.inject_spec_closure('b:b')
with pytest.raises(BuildFileParser.InvalidTargetException):
self.build_graph.inject_spec_closure('b:')
# TODO(Eric Ayers) This test broke during a refactoring and should be moved removed or updated
@pytest.mark.xfail
def test_transitive_closure_address(self):
with self.workspace('./BUILD', 'a/BUILD', 'a/b/BUILD') as root_dir:
with open(os.path.join(root_dir, './BUILD'), 'w') as build:
build.write(dedent('''
fake(name="foo",
dependencies=[
'a',
])
'''))
with open(os.path.join(root_dir, 'a/BUILD'), 'w') as build:
build.write(dedent('''
fake(name="a",
dependencies=[
'a/b:bat',
])
'''))
with open(os.path.join(root_dir, 'a/b/BUILD'), 'w') as build:
build.write(dedent('''
fake(name="bat")
'''))
def fake_target(*args, **kwargs):
assert False, "This fake target should never be called in this test!"
alias_map = {'target_aliases': {'fake': fake_target}}
self.build_file_parser.register_alias_groups(alias_map=alias_map)
bf_address = BuildFileAddress(BuildFile(root_dir, 'BUILD'), 'foo')
self.build_file_parser._populate_target_proxy_transitive_closure_for_address(bf_address)
self.assertEqual(len(self.build_file_parser._target_proxy_by_address), 3)
# TODO(Eric Ayers) This test broke during a refactoring and should be moved, removed or updated
@pytest.mark.xfail
def test_no_targets(self):
self.add_to_build_file('empty/BUILD', 'pass')
with pytest.raises(BuildFileParser.EmptyBuildFileException):
self.build_file_parser.inject_spec_closure_into_build_graph('empty', self.build_graph)
with pytest.raises(BuildFileParser.EmptyBuildFileException):
self.build_file_parser.inject_spec_closure_into_build_graph('empty:foo', self.build_graph)
def test_contains_address(self):
a = SyntheticAddress.parse('a')
self.assertFalse(self.build_graph.contains_address(a))
target = Target(name='a',
address=a,
build_graph=self.build_graph)
self.build_graph.inject_target(target)
self.assertTrue(self.build_graph.contains_address(a))
def test_get_target_from_spec(self):
a = self.make_target('foo:a')
result = self.build_graph.get_target_from_spec('foo:a')
self.assertEquals(a, result)
b = self.make_target('foo:b')
result = self.build_graph.get_target_from_spec(':b', relative_to='foo')
self.assertEquals(b, result)
def test_walk_graph(self):
"""
Make sure that BuildGraph.walk_transitive_dependency_graph() and
BuildGraph.walk_transitive_dependee_graph() return DFS preorder (or postorder) traversal.
"""
def assertDependencyWalk(target, results, postorder=False):
targets = []
self.build_graph.walk_transitive_dependency_graph([target.address],
lambda x: targets.append(x),
postorder=postorder)
self.assertEquals(results, targets)
def assertDependeeWalk(target, results, postorder=False):
targets = []
self.build_graph.walk_transitive_dependee_graph([target.address],
lambda x: targets.append(x),
postorder=postorder)
self.assertEquals(results, targets)
a = self.make_target('a')
b = self.make_target('b', dependencies=[a])
c = self.make_target('c', dependencies=[b])
d = self.make_target('d', dependencies=[c, a])
e = self.make_target('e', dependencies=[d])
assertDependencyWalk(a, [a])
assertDependencyWalk(b, [b, a])
assertDependencyWalk(c, [c, b, a])
assertDependencyWalk(d, [d, c, b, a])
assertDependencyWalk(e, [e, d, c, b, a])
assertDependeeWalk(a, [a, b, c, d, e])
assertDependeeWalk(b, [b, c, d, e])
assertDependeeWalk(c, [c, d, e])
assertDependeeWalk(d, [d, e])
assertDependeeWalk(e, [e])
assertDependencyWalk(a, [a], postorder=True)
assertDependencyWalk(b, [a, b], postorder=True)
assertDependencyWalk(c, [a, b, c], postorder=True)
assertDependencyWalk(d, [a, b, c, d], postorder=True)
assertDependencyWalk(e, [a, b, c, d, e], postorder=True)
assertDependeeWalk(a, [e, d, c, b, a], postorder=True)
assertDependeeWalk(b, [e, d, c, b], postorder=True)
assertDependeeWalk(c, [e, d, c], postorder=True)
assertDependeeWalk(d, [e, d], postorder=True)
assertDependeeWalk(e, [e], postorder=True)
#Try a case where postorder traversal is not identical to reversed preorder traversal
c = self.make_target('c1', dependencies=[])
d = self.make_target('d1', dependencies=[c])
b = self.make_target('b1', dependencies=[c, d])
e = self.make_target('e1', dependencies=[b])
a = self.make_target('a1', dependencies=[b, e])
assertDependencyWalk(a, [a, b, c, d, e])
assertDependencyWalk(a, [c, d, b, e, a], postorder=True)
def test_target_closure(self):
a = self.make_target('a')
self.assertEquals([a], a.closure())
b = self.make_target('b', dependencies=[a])
self.assertEquals([b, a], b.closure())
c = self.make_target('c', dependencies=[b])
self.assertEquals([c, b, a], c.closure())
d = self.make_target('d', dependencies=[a, c])
self.assertEquals([d, a, c, b], d.closure())
def test_target_walk(self):
def assertWalk(expected, target):
results = []
target.walk(lambda x: results.append(x))
self.assertEquals(expected, results)
a = self.make_target('a')
assertWalk([a], a)
b = self.make_target('b', dependencies=[a])
assertWalk([b, a], b)
c = self.make_target('c', dependencies=[b])
assertWalk([c, b, a], c)
d = self.make_target('d', dependencies=[a, c])
assertWalk([d, a, c, b], d)
def test_lookup_exception(self):
"""
There is code that depends on the fact that TransitiveLookupError is a subclass
of AddressLookupError
"""
self.assertIsInstance(BuildGraph.TransitiveLookupError(), AddressLookupError)
def test_invalid_address(self):
with self.assertRaisesRegexp(AddressLookupError,
'^BUILD file does not exist at:.*/BUILD'):
self.build_graph.inject_spec_closure('//:a')
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["non-existent-path:b"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at:.*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:b'
'\s+referenced from :a$'):
self.build_graph.inject_spec_closure('//:a')
def test_invalid_address_two_hops(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["goodpath:b"],'
')')
self.add_to_build_file('goodpath/BUILD',
'target(name="b", '
' dependencies=["non-existent-path:c"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at: .*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:c'
'\s+referenced from goodpath:b'
'\s+referenced from :a$'):
self.build_graph.inject_spec_closure('//:a')
def test_invalid_address_two_hops_same_file(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=["goodpath:b"],'
')')
self.add_to_build_file('goodpath/BUILD',
'target(name="b", '
' dependencies=[":c"],'
')\n'
'target(name="c", '
' dependencies=["non-existent-path:d"],'
')')
with self.assertRaisesRegexp(BuildGraph.TransitiveLookupError,
'^BUILD file does not exist at:.*/non-existent-path/BUILD'
'\s+when translating spec non-existent-path:d'
'\s+referenced from goodpath:c'
'\s+referenced from goodpath:b'
'\s+referenced from :a$'):
self.build_graph.inject_spec_closure('//:a')
def test_raise_on_duplicate_dependencies(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=['
' "other:b",'
' "//other:b",' # we should perform the test on normalized addresses
'])')
self.add_to_build_file('other/BUILD',
'target(name="b")')
with self.assertRaisesRegexp(
BuildGraph.TransitiveLookupError,
'^Addresses in dependencies must be unique. \'other:b\' is referenced more than once.'
'\s+referenced from :a$'):
self.build_graph.inject_spec_closure('//:a')
def test_inject_then_inject_closure(self):
self.add_to_build_file('BUILD',
'target(name="a", '
' dependencies=['
' "other:b",'
'])')
self.add_to_build_file('other/BUILD',
'target(name="b")')
self.build_graph.inject_address(SyntheticAddress.parse('//:a'))
self.build_graph.inject_address_closure(SyntheticAddress.parse('//:a'))
a = self.build_graph.get_target_from_spec('//:a')
b = self.build_graph.get_target_from_spec('//other:b')
self.assertIn(b, a.dependencies)
| fakeNetflix/square-repo-pants | tests/python/pants_test/graph/test_build_graph.py | test_build_graph.py | py | 13,188 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pants_test.base_test.BaseTest",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pants.util.contextutil.temporary_dir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pants.base.build_root.BuildRoot",
"line_number": 28,
"usage_type": "ca... |
29326071622 | # coding=utf-8
import matplotlib.pyplot as plt
from gensim.models import Word2Vec
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_curve, auc
import data_processing
import globe
import word2vec_gensim_train
# 读入数据
# pos_file_path = '/home/zhangxin/work/workplace_python/DeepNaturalLanguageProcessing/DeepNLP/data/test3.txt'
# neg_file_path = '/home/zhangxin/work/workplace_python/DeepNaturalLanguageProcessing/DeepNLP/data/test2.txt'
pos_file_path = '/Users/li/workshop/DataSet/sentiment/train/result_pos.txt'
neg_file_path = '/Users/li/workshop/DataSet/sentiment/train/result_neg.txt'
tmp = data_processing.read_data(pos_file_path, neg_file_path)
res = data_processing.data_split(tmp[0], tmp[1])
x_train = res[0]
x_test = res[1]
label_train = res[2]
label_test = res[3]
x_train = data_processing.text_clean(x_train)
x_test = data_processing.text_clean(x_test)
# 生成文本向量
n_dim = globe.n_dim
# model_path = '/home/zhangxin/work/workplace_python/DeepNaturalLanguageProcessing/DeepNLP/word2vecmodel/mymodel'
model_path = globe.model_path
word2vec_model = Word2Vec.load(model_path)
vecs = word2vec_gensim_train.text_vecs(x_train, x_test, n_dim, word2vec_model)
train_vecs = vecs[0]
test_vecs = vecs[1]
# 分类训练
lr = SGDClassifier(loss='log', penalty='l1')
lr.fit(train_vecs, label_train)
print('Test Accuracy: %.2f' % lr.score(test_vecs, label_test))
pred_probas = lr.predict_proba(test_vecs)[:, 1]
fpr, tpr, _ = roc_curve(label_test, pred_probas)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='area = %.2f' %roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.legend(loc='lower right')
plt.show()
| STHSF/DeepNaturalLanguageProcessing | TextClassification/sentiment_analysis/sentiment_analysis_zh/word2vec_classify_run.py | word2vec_classify_run.py | py | 1,700 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "data_processing.read_data",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "data_processing.data_split",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "data_processing.text_clean",
"line_number": 23,
"usage_type": "call"
},
{
"a... |
35396901278 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from twitter.common.collections import OrderedSet
from twitter.common.dirutil.fileset import Fileset
from twitter.common.lang import Compatibility
def assert_list(obj, expected_type=Compatibility.string, can_be_none=True, default=(),
allowable=(list, Fileset, OrderedSet, set, tuple), raise_type=ValueError):
"""
This function is used to ensure that parameters set by users in BUILD files are of acceptable types.
:param obj : the object that may be a list. It will pass if it is of type in allowable.
:param expected_type : this is the expected type of the returned list contents.
:param can_be_none : this defines whether or not the obj can be None. If True, return default.
:param default : this is the default to return if can_be_none is True and obj is None.
:param allowable : the acceptable types for obj. We do not want to allow any iterable (eg string).
:param raise_type : the error to throw if the type is not correct.
"""
val = obj
if val is None:
if can_be_none:
val = list(default)
else:
raise raise_type('Expected an object of acceptable type %s, received None and can_be_none is False' % allowable)
if [typ for typ in allowable if isinstance(val, typ)]:
lst = list(val)
for e in lst:
if not isinstance(e, expected_type):
raise raise_type('Expected a list containing values of type %s, instead got a value %s of %s' %
(expected_type, e, e.__class__))
return lst
else:
raise raise_type('Expected an object of acceptable type %s, received %s instead' % (allowable, val))
| fakeNetflix/square-repo-pants | src/python/pants/base/validation.py | validation.py | py | 1,754 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "twitter.common.lang.Compatibility.string",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "twitter.common.lang.Compatibility",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "twitter.common.dirutil.fileset.Fileset",
"line_number": 9,
... |
1478139833 | import sys
import os
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLineEdit, QLabel, QPushButton, QListView
from PyQt5.QtWidgets import QSizePolicy, QScrollArea, QCompleter, QHBoxLayout, QDialog
from PyQt5.QtCore import Qt, pyqtSlot, QModelIndex
from PyQt5.QtCore import QStandardPaths
import requests, zipfile, io
from nighandu import Nighandu
import asyncio
OLAM_DATASET_URL = "https://olam.in/open/enml/olam-enml.csv.zip"
HOME_PATH = QStandardPaths.writableLocation(QStandardPaths.HomeLocation)
FILES_DIR = os.path.join(HOME_PATH, ".Nighandu")
class NighanduGui(QWidget):
def __init__(self, parent=None):
super(NighanduGui, self).__init__(parent)
self.window().setWindowTitle("Nighandu")
self.initApp()
self.initUI()
async def downloadOlamDataset(self, url, saveLocation):
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(saveLocation)
def initApp(self):
if not os.path.exists(FILES_DIR):
os.mkdir(FILES_DIR)
csvFile = os.path.join(FILES_DIR, "olam-enml.csv")
if not os.path.exists(csvFile):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.downloadOlamDataset(OLAM_DATASET_URL, FILES_DIR))
self.nighandu = Nighandu(csvFile)
def initUI(self):
#widget properties
self.setMinimumSize(895, 680)
mainLayout = QHBoxLayout()
#inputs Widgets
inputLayout = QHBoxLayout()
self.searchButton = QPushButton("&Search", self)
self.searchButton.setFixedSize(80, 30)
self.searchButton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.searchButton.clicked.connect(self.searchButtonClicked)
wordList = self.nighandu.word_list()
self.wordInput = QLineEdit(self)
self.wordInput.setFixedHeight(30)
completer = QCompleter(wordList, self)
completer.setCaseSensitivity(Qt.CaseInsensitive)
self.wordInput.setCompleter(completer)
self.wordInput.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.wordInput.returnPressed.connect(self.searchButtonClicked)
inputLayout.addWidget(self.wordInput)
inputLayout.addWidget(self.searchButton)
leftControlsLayout = QVBoxLayout()
leftControlsLayout.addLayout(inputLayout)
suggesionsList = QListView(self)
suggesionsList.setEditTriggers(QListView.NoEditTriggers)
suggesionsList.setModel(completer.completionModel())
suggesionsList.clicked.connect(self.suggesionsListClicked)
leftControlsLayout.addWidget(suggesionsList)
mainLayout.addLayout(leftControlsLayout)
self.wordViewerLabel = QLabel(self)
self.wordViewerScrollArea = QScrollArea(self)
self.wordViewerScrollArea.setWidgetResizable(True)
self.wordViewerScrollArea.setWidget(self.wordViewerLabel)
self.wordViewerScrollArea.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.wordViewerLabel.setMargin(20)
self.wordViewerLabel.setAlignment(Qt.AlignTop)
#initial font size
font = self.wordViewerLabel.font()
font.setPixelSize(15)
self.wordViewerLabel.setFont(font)
self.wordViewerLabel.setText("<center> <h1> Nighandu </h1></center>")
self.zoomInButton = QPushButton("ZoomIn (+)", self)
self.zoomInButton.clicked.connect(self.zoomIn)
self.zoomOutButton = QPushButton("ZoomOut (-)", self)
self.zoomOutButton.clicked.connect(self.zoomOut)
self.aboutButton = QPushButton("About", self)
self.aboutButton.clicked.connect(self.about)
zoomButtonLayout = QHBoxLayout()
zoomButtonLayout.addWidget(self.aboutButton)
zoomButtonLayout.addStretch()
zoomButtonLayout.addWidget(self.zoomInButton)
zoomButtonLayout.addWidget(self.zoomOutButton)
rightConrolsLayout = QVBoxLayout()
rightConrolsLayout.addWidget(self.wordViewerScrollArea)
rightConrolsLayout.addLayout(zoomButtonLayout)
mainLayout.addLayout(rightConrolsLayout)
self.setLayout(mainLayout)
@pyqtSlot()
def searchButtonClicked(self):
#change case
word = self.wordInput.text().lower()
word = word.replace(word[0], word[0].upper(), 1)
results = self.searchMeaning(word)
if results == None:
txt ="Sorry No results Found"
else:
txt = self.formatResults(results)
self.wordViewerLabel.setText(txt)
@pyqtSlot(QModelIndex)
def suggesionsListClicked(self, index):
results = self.searchMeaning(index.data())
if results == None:
txt ="Sorry No results Found"
else:
txt = self.formatResults(results)
self.wordViewerLabel.setText(txt)
def formatResults(self, results):
verbs = []
nouns = []
adjectives = []
adverbs = []
pronouns = []
properNouns = []
phrasalVerbs = []
conjunctions = []
interjections = []
prepositions = []
prefixs = []
suffixs = []
idioms = []
abbreviations = []
auxiliaryVerbs = []
meanings = []
for result in results:
if result['part_of_speech'] == "n":
nouns.append(result['malayalam_definition'])
elif result['part_of_speech'] == "v":
verbs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "a":
adjectives.append(result['malayalam_definition'])
elif result['part_of_speech'] == "adv":
adverbs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "pron":
pronouns.append(result['malayalam_definition'])
elif result['part_of_speech'] == "propn":
properNouns.append(result['malayalam_definition'])
elif result['part_of_speech'] == "phrv":
phrasalVerbs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "conj":
conjunctions.append(result['malayalam_definition'])
elif result['part_of_speech'] == "interj":
interjections.append(result['malayalam_definition'])
elif result['part_of_speech'] == "prep":
prepositions.append(result['malayalam_definition'])
elif result['part_of_speech'] == "pfx":
prefixs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "sfx":
suffixs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "abbr":
abbreviations.append(result['malayalam_definition'])
elif result['part_of_speech'] == "auxv":
auxiliaryVerbs.append(result['malayalam_definition'])
elif result['part_of_speech'] == "idm":
idioms.append(result['malayalam_definition'])
else:
meanings.append(result['malayalam_definition'])
meaningHtmlContent = "" if len(meanings) == 0 else '''<hr/>
<h3>അര്ത്ഥം <span> :Meaning</span></h3>
<hr/>'''
for meaning in meanings:
meaningHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(meaning)
nounHtmlContent = "" if len(nouns) == 0 else '''<hr/>
<h3>നാമം <span>:Noun</span></h3>
<hr/>'''
for noun in nouns:
nounHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(noun)
verbHtmlContent = "" if len(verbs) == 0 else '''
<hr/>
<h3>ക്രിയ <span> :Verb</span></h3>
<hr/>
'''
for verb in verbs:
verbHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(verb)
adjectivesHtmlContent = "" if len(adjectives) == 0 else '''<hr/>
<h3>വിശേഷണം<span>:Adjective</span></h3>
<hr/>'''
for adjective in adjectives:
adjectivesHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(adjective)
adverbHtmlContent = "" if len(adverbs) == 0 else '''
<hr/>
<h3>ക്രിയാവിശേഷണം<span> :Adverb</span></h3>
<hr/>
'''
for adverb in adverbs:
adverbHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(adverb)
pronounHtmlContent = "" if len(pronouns) == 0 else '''
<hr/>
<h3>സര്വ്വനാമം<span> :Pronoun</span></h3>
<hr/>
'''
for pronoun in pronouns:
pronounHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(pronoun)
propernounHtmlContent = "" if len(properNouns) == 0 else '''
<hr/>
<h3>സംജ്ഞാനാമം<span> :Proper noun</span></h3>
<hr/>
'''
for propnoun in properNouns:
propernounHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(propnoun)
phrasalVerbHtmlContent = "" if len(phrasalVerbs) == 0 else '''
<hr/>
<h3>ഉപവാക്യ ക്രിയ<span> :Phrasal verb</span></h3>
<hr/>
'''
for phrasalVerb in phrasalVerbs:
phrasalVerbHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(phrasalVerb)
conjunctionHtmlContent = "" if len(conjunctions) == 0 else '''
<hr/>
<h3>അവ്യയം<span>:Conjunction</span></h3>
<hr/>
'''
for conjunction in conjunctions:
conjunctionHtmlContent += '''
<li><h4>{0}</h4></li>
'''.format(conjunction)
interjectionHtmlContent = "" if len(interjections) == 0 else '''
<hr/>
<h3>വ്യാക്ഷേപകം<span> :interjection</span></h3>
<hr/>
'''
for interjection in interjections:
interjectionHtmlContent += '''
<li>{0}</li>
'''.format(interjection)
prepositionHtmlContent = "" if len(prepositions) == 0 else '''
<hr/>
<h3>വ്യാക്ഷേപകം<span> :preposition</span></h3>
<hr/>
'''
for preposition in prepositions:
prepositionHtmlContent += '''
<li>{0}</li>
'''.format(preposition)
prefixHtmlContent = "" if len(prefixs) == 0 else '''
<hr/>
<h3>പൂർവ്വപ്രത്യയം<span> :Prefix</span></h3>
<hr/>
'''
for prefix in prefixs:
prefixHtmlContent += '''
<li>{0}</li>
'''.format(prefix)
suffixHtmlContent = "" if len(suffixs) == 0 else '''
<hr/>
<h3>പ്രത്യയം<span> :Suffix</span></h3>
<hr/>
'''
for suffix in suffixs:
suffixHtmlContent += '''
<li>{0}</li>
'''.format(suffix)
abbrHtmlContent = "" if len(abbreviations) == 0 else '''
<hr/>
<h3>പ്രത്യയം<span> :Suffix</span></h3>
<hr/>
'''
for abbr in abbreviations:
abbrHtmlContent += '''
<li>{0}</li>
'''.format(abbr)
auxiliaryVerbHtmlContent = "" if len(auxiliaryVerbs) == 0 else '''
<hr/>
<h3>പൂരകകൃതി <span> :Auxiliary verb</span></h3>
<hr/>
'''
for auxv in auxiliaryVerbs:
auxiliaryVerbHtmlContent += '''
<li>{0}</li>
'''.format(auxv)
idiomsHtmlContent = "" if len(idioms) == 0 else '''
<hr/>
<h3>പൂരകകൃതി <span> :Idioms</span></h3>
<hr/>
'''
for idiom in idioms:
idiomsHtmlContent += '''
<li>{0}</li>
'''.format(idiom)
htmlContent = '''
<h3>Word : {0} </h3>
<ul>
{1}
{2}
{3}
{4}
{5}
{6}
{7}
{8}
{9}
{10}
{11}
{12}
{13}
{14}
{15}
{16}
</ul>
'''.format(self.wordInput.text().strip(), meaningHtmlContent, nounHtmlContent, verbHtmlContent, adjectivesHtmlContent,
adverbHtmlContent, pronounHtmlContent, propernounHtmlContent, phrasalVerbHtmlContent, conjunctionHtmlContent,
interjectionHtmlContent, prepositionHtmlContent, prefixHtmlContent, suffixHtmlContent, abbrHtmlContent, auxiliaryVerbHtmlContent,
idiomsHtmlContent)
return htmlContent
def searchMeaning(self, word):
results = self.nighandu.search_word(word)
return results
@pyqtSlot()
def zoomIn(self):
font = self.wordViewerLabel.font()
fontSize = font.pixelSize()
font.setPixelSize(fontSize+3)
self.wordViewerLabel.setFont(font)
@pyqtSlot()
def zoomOut(self):
font = self.wordViewerLabel.font()
fontSize = font.pixelSize()
font.setPixelSize(fontSize-3)
self.wordViewerLabel.setFont(font)
@pyqtSlot()
def about(self):
content = """
<center>
<h2> Nighandu </h2>
<p>
Nighandu is an free opensoure english malayalam dictionary software. <br/>
This is based on <a href="https://olam.in/open/enml/">Olam English-Malayalam dictionary dataset</a>
<br/>
<br/>
<br/>
Project: https://github.com/Vivx701/Nighandu
<br/>
Developer: Vivek.P (https://github.com/Vivx701)
<br/>
</p>
</center>
"""
contentLayout = QHBoxLayout()
contentLabel = QLabel(self)
contentLabel.setText(content)
contentLayout.addWidget(contentLabel)
contentLayout.addStretch()
dialog = QDialog(self)
dialog.window().setWindowTitle("About")
dialog.setLayout(contentLayout)
dialog.exec()
if __name__ == "__main__":
app = QApplication(sys.argv)
nighanduGui = NighanduGui()
nighanduGui.show()
sys.exit(app.exec_()) | Vivx701/Nighandu | nighandu_gui.py | nighandu_gui.py | py | 15,836 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PyQt5.QtCore.QStandardPaths.writableLocation",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QStandardPaths",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QStandardPaths.HomeLocation",
"line_number": 13,
"u... |
6241769210 | """A simple simulation of wave packet.
Refer the details to the journal paper: PRA 45, 4734 (1992).
"""
from importlib.resources import path
import numpy as np
import pandas as pd
import xarray as xr
from . import rsc
from .electricfield import ElectricField
__all__ = ["predefined_target", "WavePacket"]
def predefined_target(name: str) -> pd.DataFrame:
with path(rsc, "{}.xlsx".format(name)) as fn:
return pd.read_excel(fn, "Levels")
class WavePacket:
def __init__(self, field: ElectricField, target: (str, pd.DataFrame)):
if isinstance(target, str):
target = predefined_target(target)
if "config" in target:
if not target["config"].is_unique:
raise ValueError(
"Values in target['config'] should be unique.")
idx = target["config"]
else:
idx = range(len(target))
self.__status = pd.DataFrame({
"config": idx,
"freq": target["level"],
"coeff": target["strength"]**0.5 * field.at_k(target["level"]),
}).set_index("config")
@property
def status(self) -> pd.DataFrame:
return self.__status
def __call__(self, t: np.ndarray) -> xr.DataArray:
n = self.__status.index # dims: [n]
k = self.__status["freq"] # dims: [n]
c = self.__status["coeff"] # dims: [n]
a = -1j * np.exp(-1j * k[None, :] * t[:, None]) * c[None, :].conj()
# dims: [t, n]
return xr.DataArray(
(a[:, :, None] * a[:, None, :].conj()).real,
coords=[t, n, n],
dims=["t", "n", "n'"],
)
| DaehyunPY/FERMI_20149100 | Packages/simul2/wavepacket.py | wavepacket.py | py | 1,648 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "importlib.resources.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "elec... |
31061019305 |
from ..utils import Object
class CancelUploadFile(Object):
"""
Stops the uploading of a file. Supported only for files uploaded by using uploadFile. For other files the behavior is undefined
Attributes:
ID (:obj:`str`): ``CancelUploadFile``
Args:
file_id (:obj:`int`):
Identifier of the file to stop uploading
Returns:
Ok
Raises:
:class:`telegram.Error`
"""
ID = "cancelUploadFile"
def __init__(self, file_id, extra=None, **kwargs):
self.extra = extra
self.file_id = file_id # int
@staticmethod
def read(q: dict, *args) -> "CancelUploadFile":
file_id = q.get('file_id')
return CancelUploadFile(file_id)
| iTeam-co/pytglib | pytglib/api/functions/cancel_upload_file.py | cancel_upload_file.py | py | 735 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
}
] |
42850936844 | from django.urls import path
from . import views
urlpatterns = [
path('register/', views.registerPage, name='register'),
path('login/', views.loginPage, name='login'),
path('logout/', views.logoutUser, name='logout'),
path('event_create/', views.event_create, name='event_create'),
path('event_manager/', views.event_manager, name='event_manager'),
path('event_update/<str:pk>/', views.event_update, name='event_update'),
path('event_delete/<str:pk>/', views.event_delete, name='event_delete'),
] | Barnacle322/esoapp | eventsmanager/eventcreation/urls.py | urls.py | py | 525 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
17754409752 | import tornado.ioloop
import tornado.web
import tornado.httpserver
import io
import os
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy import inspect
from sqlalchemy import text
from sqlalchemy.orm import sessionmaker
import mercantile
import pyproj
import yaml
import sys
import itertools
import re
def GetTM2Source(file):
with open(file,'r') as stream:
tm2source = yaml.load(stream)
return tm2source
def GeneratePrepared():
# We have land polygons, but want water (ocean/sea) polygons.
# Creating a diff against the northern hemisphere segfaults Postgres, perhaps because of awkward mathematics around the north pole?
# Instead, diff against a tile crop.
# 1. ST_Intersection(geometry, !bbox_nobuffer!) — the multiple bits of land in this tile (null if we're in the ocean)
# 2. ST_Union(...) — all joined together into a multipolygon (null in the ocean)
# 3. ST_Difference(...) — the negative (*null* in the ocean)
# 4. COALESCE(..., !bbox_nobuffer!) — if null from the ocean, return the original bounding box
# This test is hardcoded to north_osm_land_polygons_gen7 for speed.
tile_geom_query = "SELECT ST_AsMVTGeom(geometry,!bbox_nobuffer!,4096,0,true) AS mvtgeometry FROM (" + \
" SELECT COALESCE(ST_Difference(!bbox_nobuffer!, ST_Union(ST_Intersection(geometry, !bbox_nobuffer!))), !bbox_nobuffer!) AS geometry FROM north_osm_land_polygons_gen7 WHERE geometry && !bbox_nobuffer! " + \
") AS x WHERE geometry IS NOT NULL AND NOT ST_IsEmpty(geometry) AND ST_AsMVTGeom(geometry,!bbox_nobuffer!,4096,0,true) IS NOT NULL"
base_query = "SELECT ST_ASMVT('water', 4096, 'mvtgeometry', tile) FROM ("+tile_geom_query+") AS tile WHERE tile.mvtgeometry IS NOT NULL"
# Ocean:
# 5.0 7.0 26.0 EXECUTE gettile( ST_SetSRID(ST_MakeBox2D(ST_Point(-5068105.193371859, -6194350.79189894), ST_Point(-4504982.39410832, -5631227.992635399)), 3575) , 3928032.9189700056, 512, 512);
# → Null.
# Coast:
# 5.0 9.0 28.0 EXECUTE gettile( ST_SetSRID(ST_MakeBox2D(ST_Point(-3941859.59484478, -7320596.390426019), ST_Point(-3378736.7955812397, -6757473.5911624795)), 3575) , 3928032.9189700056, 512, 512);
# → Data
# Land:
# 5.0 12.0 29.0 EXECUTE gettile( ST_SetSRID(ST_MakeBox2D(ST_Point(-2252491.19705416, -7883719.18968956), ST_Point(-1689368.3977906199, -7320596.390426019)), 3575) , 3928032.9189700056, 512, 512);
# → SRID=3575;GEOMETRYCOLLECTION EMPTY
query = base_query.replace("!bbox_nobuffer!","$1").replace("!scale_denominator!","$2").replace("!pixel_width!","$3").replace("!pixel_height!","$4")
print (base_query)
prepared = "PREPARE gettile(geometry, numeric, numeric, numeric) AS " + query + ";"
print(prepared)
return(prepared)
print("Starting up")
prepared = GeneratePrepared()
connection_string = 'postgresql://'+os.getenv('POSTGRES_USER','openmaptiles')+':'+os.getenv('POSTGRES_PASSWORD','openmaptiles')+'@'+os.getenv('POSTGRES_HOST','postgres')+':'+os.getenv('POSTGRES_PORT','5432')+'/'+os.getenv('POSTGRES_DB','openmaptiles')
engine = create_engine(connection_string)
inspector = inspect(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
print("Running prepare statement")
session.execute(prepared)
def bounds(zoom,x,y,buff):
print('Tile',zoom,x,y,'with buffer',buff)
map_width_in_metres = 2 * 2**0.5*6371007.2
tiles_down = 2**(zoom)
tiles_across = 2**(zoom)
x = x - 2**(zoom-1)
y = -(y - 2**(zoom-1)) - 1
tile_width_in_metres = (map_width_in_metres / tiles_across)
tile_height_in_metres = (map_width_in_metres / tiles_down)
ws = ((x - buff)*tile_width_in_metres, (y - buff)*tile_width_in_metres)
en = ((x+1+buff)*tile_height_in_metres, (y+1+buff)*tile_height_in_metres)
print("Zoom, buffer", zoom, buff)
print("West: ", ws[0])
print("South: ", ws[1])
print("East: ", en[0])
print("North: ", en[1])
return {'w':ws[0],'s':ws[1],'e':en[0],'n':en[1]}
def zoom_to_scale_denom(zoom): # For !scale_denominator!
# From https://github.com/openstreetmap/mapnik-stylesheets/blob/master/zoom-to-scale.txt
map_width_in_metres = 2 * 2**0.5*6371007.2 # Arctic
tile_width_in_pixels = 512.0 # This asks for a zoom level higher, since the tiles are doubled.
standardized_pixel_size = 0.00028
map_width_in_pixels = tile_width_in_pixels*(2.0**zoom)
return str(map_width_in_metres/(map_width_in_pixels * standardized_pixel_size))
def replace_tokens(query,tilebounds,scale_denom,z):
s,w,n,e = str(tilebounds['s']),str(tilebounds['w']),str(tilebounds['n']),str(tilebounds['e'])
start = query.replace("!bbox!","ST_SetSRID(ST_MakeBox2D(ST_Point("+w+", "+s+"), ST_Point("+e+", "+n+")), 3575)").replace("!scale_denominator!",scale_denom).replace("!pixel_width!","512").replace("!pixel_height!","512")
return start
def get_mvt(zoom,x,y):
try: # Sanitize the inputs
sani_zoom,sani_x,sani_y = float(zoom),float(x),float(y)
del zoom,x,y
except:
print('suspicious')
return 1
scale_denom = zoom_to_scale_denom(sani_zoom)
tilebounds = bounds(sani_zoom,sani_x,sani_y,0)
final_query = "EXECUTE gettile(!bbox!, !scale_denominator!, !pixel_width!, !pixel_height!);"
sent_query = replace_tokens(final_query,tilebounds,scale_denom,sani_zoom)
print(sani_zoom, sani_x, sani_y, sent_query)
response = list(session.execute(sent_query))
layers = filter(None,list(itertools.chain.from_iterable(response)))
final_tile = b''
for layer in layers:
final_tile = final_tile + io.BytesIO(layer).getvalue()
return final_tile
class GetTile(tornado.web.RequestHandler):
def get(self, zoom,x,y):
self.set_header("Content-Type", "application/x-protobuf")
self.set_header("Content-Disposition", "attachment")
self.set_header("Access-Control-Allow-Origin", "*")
response = get_mvt(zoom,x,y)
self.write(response)
def m():
if __name__ == "__main__":
# Make this prepared statement from the tm2source
application = tornado.web.Application([
(r"/tiles/([0-9]+)[/_]([0-9]+)[/_]([0-9]+).pbf", GetTile),
(r"/([^/]*)", tornado.web.StaticFileHandler, {"path": "./static", "default_filename": "index_3575.html"})
])
server = tornado.httpserver.HTTPServer(application)
server.bind(8080)
server.start(1)
print("Postserve started..")
#application.listen(8080)
tornado.ioloop.IOLoop.instance().start()
m()
| gbif/gbif-basemaps | polar-water-tiles/polar-water-preview/server_3575.py | server_3575.py | py | 6,778 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "yaml.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.inspect",
... |
29656137310 | import time
import tweepy
auth = tweepy.OAuthHandler('KINHgXqoSTS5ReyTnjXSYAA6w', 'ehCnMc37yfAf6PPdmzQMJM7pkUb5HYsnPfZw0vf5m9rxPNEbVm')
auth.set_access_token('1488729367346040833-mQJ2oNZDK0Rj49uLojV9WAYL4oURe0', '8zzRNCJ9sGxcnxJxgVEQkfNC7kWL12Akgpd2gdUt6REo3')
api = tweepy.API(auth)
user = api.me()
# public_tweets = api.home_timeline()
# for tweet in public_tweets:
# print(tweet.text)
def limit_handle(cursor):
try:
while True:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(1000)
# for follower in limit_handle(tweepy.Cursor(api.followers).items()):
# if follower.name == '':
# follower.follow()
# print(follower.name)
search_item = 'nasa'
numberOfTweets = 10
for tweet in tweepy.Cursor(api.search, search_item).items(numberOfTweets):
try:
tweet.favorite()
print('likey')
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break | giochoa/pythontest | twitterbot/tweety.py | tweety.py | py | 978 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tweepy.RateLimitError",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
... |
27698021659 | # -*- coding: utf-8 -*-#
'''
# Name: dnn_regression-keras
# Description:
# Author: super
# Date: 2020/6/2
'''
from HelperClass2.MnistImageDataReader import *
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def load_data():
train_file = "../data/ch09.train.npz"
test_file = "../data/ch09.test.npz"
dataReader = DataReader_2_0(train_file, test_file)
dataReader.ReadData()
# dr.NormalizeX()
# dr.NormalizeY(YNormalizationMethod.Regression)
dataReader.Shuffle()
dataReader.GenerateValidationSet()
x_train, y_train = dataReader.XTrain, dataReader.YTrain
x_test, y_test = dataReader.XTest, dataReader.YTest
x_val, y_val = dataReader.XDev, dataReader.YDev
return x_train, y_train, x_test, y_test, x_val, y_val
def build_model():
model = Sequential()
model.add(Dense(4, activation='sigmoid', input_shape=(1, )))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='Adam',
loss='mean_squared_error')
return model
#画出训练过程中训练和验证的精度与损失
def draw_train_history(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
if __name__ == '__main__':
x_train, y_train, x_test, y_test, x_val, y_val = load_data()
# print(x_train.shape)
# print(x_test.shape)
# print(x_val.shape)
model = build_model()
history = model.fit(x_train, y_train, epochs=50, batch_size=10, validation_data=(x_val, y_val))
draw_train_history(history)
loss = model.evaluate(x_test, y_test)
print("test loss: {}".format(loss))
weights = model.get_weights()
print("weights: ", weights) | Knowledge-Precipitation-Tribe/Neural-network | code/DNN/dnn_regression-keras.py | dnn_regression-keras.py | py | 1,937 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "keras.models.Sequential",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "keras.layers.Dense",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "keras.laye... |
4579701597 | from django.http import JsonResponse
from django.views.generic import View
from .models import Scraper
from .validators import currency_serializer, get_valid_data
class ScraperAPI(View):
def get(self, *args, **kwargs):
currencies = Scraper.objects.all()
data = {"scrapers": list(map(currency_serializer, currencies))}
return JsonResponse(data)
def post(self, *args, **kwargs):
data, is_valid = get_valid_data('POST', self.request.body)
if not is_valid:
return JsonResponse(data, status=400)
if Scraper.objects.filter(currency=data['currency']).count() != 0:
return JsonResponse({"error": "This currency already exists"}, status=400)
scraper = Scraper.objects.create(currency=data['currency'], frequency=data['frequency'])
scraper.values.create(value=0)
data = {
"id" : scraper.id,
"created_at": scraper.create_at,
"currency" : scraper.currency,
"frequency" : scraper.frequency
}
return JsonResponse(data)
def put(self, *args, **kwargs):
data, is_valid = get_valid_data('PUT', self.request.body)
if not is_valid:
return JsonResponse(data, status=400)
if Scraper.objects.filter(pk=data['id']).count() == 0:
return JsonResponse({"error": "This Scraper not exists"}, status=400)
Scraper.objects.filter(pk=int(data['id'])).update(frequency=int(data['frequency']))
data = {"msg": "Scraper updated"}
return JsonResponse(data)
def delete(self, *args, **kwargs):
data, is_valid = get_valid_data('DELETE', self.request.body)
if not is_valid:
return JsonResponse(data, status=400)
if Scraper.objects.filter(pk=data['id']).count() == 0:
return JsonResponse({"error": "This Scraper not exists"}, status=400)
Scraper.objects.filter(pk=data['id']).delete()
data = {"msg": "Scraper deleted"}
return JsonResponse(data)
| chvilches/rg-corp | api/views.py | views.py | py | 2,052 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.View",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.Scraper.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Scraper.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"a... |
74784438504 | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, input_dim : int, output_dim : int, hidden_dim : list,
num_layers:int, dropout_rate:float=0.):
super(MLP, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.dropout_rate = dropout_rate
self.num_layers = num_layers
# Create input layer
self.input_layer = nn.Linear(input_dim, hidden_dim[0])
self.hidden_layers = [nn.Linear(hidden_dim[n+1], hidden_dim[n+2]) for n in range(num_layers-2)]
self.output_layer = nn.Linear(hidden_dim[-1], output_dim)
self.relu = nn.ReLU()
self.droput = nn.DropOut(dropout_rate)
def forward(self, x):
outputs = self.relu(self.input_layer(x))
for h_layer in self.hidden_layers:
outputs = self.relu(self.dropout(h_layer(outputs)))
outputs = self.output_layer(outputs)
return outputs
| GarfieldCK/AI-module | ai_modules/models/module.py | module.py | py | 1,034 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
27977418436 | #!/usr/bin/env python
import config
import json
import requests
import sys
"""
Copyright (c) 2020, Cisco Systems, Inc. and/or its affiliates
Creates webhooks in a repo upon release using
GitHub API v3 POST /repos/:owner/:repo/hooks
Requires a file with repo names, one per line,
and a personal access token with access to each repo.
Usage:
python create_webhook.py devnet_repos.txt
"""
def get_webhook(gh_orgname, repo_name, gh_username, gh_api_key):
api_uri = "https://api.github.com/repos/{}/{}/hooks".format(gh_orgname, repo_name)
print(api_uri)
session = requests.Session()
session.auth = (gh_username, gh_api_key)
try:
gethooks = session.get(api_uri)
print(json.dumps(gethooks.json(), indent=4))
except:
print(gethooks.status_code)
print("Response text: {}".format(gethooks.text))
def post_create_webhook(gh_orgname, repo_name, gh_username, gh_api_key, gh_webhook_url, gh_secret):
api_uri = "https://api.github.com/repos/{}/{}/hooks".format(gh_orgname, repo_name)
print("API endpoint: {}".format(api_uri))
print("Username: {}".format(gh_username))
print("API Key: {}".format(gh_api_key))
print("Secret for payload: {}".format(gh_secret))
try:
headers = {'User-Agent': '{}'.format(gh_username),
'Content-Type': 'application/json',
'Authorization': 'token {}'.format(gh_api_key)
}
print(headers)
payload = {
'name': 'web',
'active': True,
'events': ['release'],
'config': {
'url': '{}'.format(gh_webhook_url),
'content_type': 'json',
'secret': '{}'.format(gh_secret),
'insecure_ssl': '0'
}
}
session = requests.Session()
makehooks = requests.Request('POST', api_uri, json=payload, headers=headers).prepare()
resp = session.send(makehooks)
print(resp.status_code)
print(json.dumps(resp.json(), indent=4))
except:
print(resp.status_code)
print("Response text: {}".format(resp.text))
sys.exit()
def main(filename):
if not len(args) == 1:
print("Enter the filename for the file that contains the list of repos, one per line")
return
filename = args[0]
# Read data in from a text list of all LL repo names
repolist = []
with open(filename) as f:
repolist = f.readlines()
for repo in repolist:
repo_name = repo.rstrip('\n')
print("Working on this repo: " + repo_name)
#getresponse = get_webhook(config.gh_orgname, repo_name, config.gh_username, config.gh_api_key)
postresponse = post_create_webhook(config.gh_orgname, repo_name, config.gh_username, config.gh_api_key, config.gh_webhook_url, config.gh_secret)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| justwriteclick/gh-webhooks | create_webhook.py | create_webhook.py | py | 3,009 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "requests.Request",
"... |
71903311144 | import torch.nn as nn
import torch
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader
from prior_learning.toy_env.toyloader import toyenv_Dataset
size = 8
seq_len = 32
categories = 16
batch_size = 128
feature_dim = 16
features = np.random.random((categories, feature_dim))
train_loader = DataLoader(toyenv_Dataset(features, size, seq_len, categories), batch_size = batch_size, num_workers= 40, shuffle = True)
net = nn.Sequential(
nn.Linear(feature_dim, 32),
nn.ReLU(),
nn.Linear(32, 16),
nn.ReLU(),
nn.Linear(16, 1)
)
net.cuda()
criteria = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=1e-3)
reg_sum = 0
loss_sum = 0
for i, data in enumerate(train_loader):
if i == 29999:
optimizer = optim.Adam(net.parameters(), lr=1e-4)
blocks, masks, rewards = [d.cuda() for d in data]
blocks = blocks.view(batch_size * seq_len, feature_dim)
rewards_hat = net(blocks)
rewards_hat = rewards_hat.view(batch_size, seq_len)
reg = torch.mean(torch.abs(rewards_hat)) * 0.01
rewards_hat = torch.sum(rewards_hat * masks, 1)
loss = criteria(rewards_hat, rewards) + reg
loss_sum += loss.item()
reg_sum += reg.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 2000 == 1999:
print('[{}] loss: {}, reg: {}'.format(i + 1, loss_sum / 100, reg_sum / 100))
loss_sum = 0
reg_sum = 0
if i % 10000 == 9999:
result = net(torch.from_numpy(features).float().cuda()).flatten().detach().cpu().numpy()
print('=' * 40)
print(result)
print('='*40)
| buoyancy99/sap | prior_learning/toy_env/train_toy.py | train_toy.py | py | 1,633 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.random.random",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pri... |
36622911721 | #"""Build and train for the AI Models."""
#imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import os
from data_load import DataLoader
import numpy as np
import tensorflow as tf
model_name = ""
def reshape_function(data, label):
reshaped_data = tf.reshape(data, [-1, 10, 1])
return reshaped_data, label
def calculate_model_size(model):
print(model.summary())
var_sizes = [
np.product(list(map(int, v.shape))) * v.dtype.size
for v in model.trainable_variables
]
print("Model size:", sum(var_sizes) / 1024, "KB")
def build_cnn(seq_length):
"""Builds a convolutional neural network in Keras."""
global model_name
if args.modelnumber == "0":
model_name = "-CNN_model-0"
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(
10, (20, 10),
padding="same",
activation="relu",
input_shape=(seq_length, 10, 1)))
model.add(tf.keras.layers.MaxPooling2D((3, 3)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(9, activation='linear'))
model.summary()
elif args.modelnumber == "1":
model_name = "-CNN_model-1"
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(
10, (20, 10),
padding="same",
activation="relu",
input_shape=(seq_length, 10, 1)),
tf.keras.layers.MaxPool2D((3, 3)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(16, (10, 1), padding="same",
activation="relu"),
tf.keras.layers.MaxPool2D((3, 1), padding="same"),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(9, activation="relu")
])
model_path = os.path.join("./netmodels", "CNN")
print("Built CNN.")
if not os.path.exists(model_path):
os.makedirs(model_path)
return model, model_path
def build_lstm(seq_length):
"""Builds an LSTM in Keras."""
#LSTM Sequential model with 2 layers, 100 neurons in first layer after it a flatten and then a dense-layer with 9 neurons
#Best performing model till now 28.11.2023 14:26
#RMSE 1.4 -> but no accurate predictions epochs 30 -> seq 20 -> batch 64
#Loss: 0.939727783203125, RMSE: 0.9693955779075623 -> epochs 30 -> batch 64 -> seq 20
global model_name
#TODO add modelnumber to foldername
if args.modelnumber == "0":
model_name = "-LSTM_model-0"
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dense(units=9, activation="linear"),
])
model.summary()
if args.modelnumber == "1":
model_name = "-LSTM_model-1"
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=9, activation="linear"),
])
model.summary()
elif args.modelnumber == "2":
model_name = "-LSTM_model-2"
#LSTM Sequential model with 2 layers, 100 neurons in first layer after it a Dropoutlayer with 20% and then a dense-layer with 9 neurons
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=9, activation="linear"),
])
model.summary()
elif args.modelnumber == "3":
model_name = "-LSTM_model-3"
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=9, activation="softmax"),
])
model.summary()
elif args.modelnumber == "4":
model_name = "-LSTM_model-4"
#LSTM Sequential model with 3 layers, 100 neurons in first layer, 100 neurons in second layer and then a dense-layer with 9 neurons
model = tf.keras.Sequential([
tf.keras.Input(shape=(seq_length, 10)),
tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dense(units=9, activation="linear"),
])
model.summary()
elif args.modelnumber == "5":
model_name = "-LSTM_model-5"
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(100, return_sequences = True),
input_shape=(seq_length, 10)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.LSTM(100),
tf.keras.layers.Dense(units=9, activation="linear")
])
model_path = os.path.join("./netmodels", "LSTM")
print("Built LSTM.")
if not os.path.exists(model_path):
os.makedirs(model_path)
return model, model_path
def load_data(train_data_path, valid_data_path, test_data_path, seq_length):
data_loader = DataLoader(
train_data_path, valid_data_path, test_data_path, seq_length=seq_length)
data_loader.format()
return data_loader.train_len, data_loader.train_data, data_loader.valid_len, \
data_loader.valid_data, data_loader.test_len, data_loader.test_data
def build_net(args, seq_length):
if args.model == "CNN":
model, model_path = build_cnn(seq_length)
elif args.model == "LSTM":
model, model_path = build_lstm(seq_length)
else:
print("Please input correct model name.(CNN LSTM)")
return model, model_path
def train_net(
model,
model_path, # pylint: disable=unused-argument
train_len, # pylint: disable=unused-argument
train_data,
valid_len,
valid_data, # pylint: disable=unused-argument
test_len,
test_data,
kind):
"""Trains the model."""
calculate_model_size(model)
#tested batch_sizes = 64, 128, 16, 10, 64
#RMSE 1,7 -> 10 epochs -> batch 64 -> sequenc 20
epochs = 30
#The batch_size argument specifies how many pieces of training data to feed into the network before measuring its accuracy and updating its weights and biases.
batch_size = 64
rmse = tf.keras.metrics.RootMeanSquaredError()
model.compile(
optimizer='adam',
loss='mse',
metrics=[tf.keras.metrics.RootMeanSquaredError(), "accuracy"])
if kind == "CNN":
train_data = train_data.map(reshape_function)
test_data = test_data.map(reshape_function)
valid_data = valid_data.map(reshape_function)
test_labels = np.zeros(test_len)
idx = 0
for data, label in test_data: # pylint: disable=unused-variable
test_labels[idx] = label.numpy()
print(str(label))
idx += 1
#load train_data_entry for test
print("--> trainTest_labels: ")
trainTest_labels = np.zeros(train_len)
idx = 0
for data, label in train_data: # pylint: disable=unused-variable
trainTest_labels[idx] = label.numpy()
print(str(label))
idx += 1
trainTest_data = train_data.batch(batch_size)
train_data = train_data.batch(batch_size).repeat()
valid_data = valid_data.batch(batch_size)
test_data = test_data.batch(batch_size)
#EaelyStop
#EarlyStopping() saves us a lot of time, it stops the model training once it realizes that there will be no more decrease in loss in further epochs and training can now be stopped earlier than described epochs.
early_stop = tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', patience = 2)
model.fit(
train_data,
epochs=epochs,
validation_data=valid_data,
steps_per_epoch=1000,
#validation_steps=int((valid_len - 1) / batch_size + 1),
validation_steps=1,
#callbacks=[tensorboard_callback, early_stop])
callbacks=[tensorboard_callback])
loss, rmse, acc= model.evaluate(test_data)
pred = np.argmax(model.predict(test_data), axis=1)
print("\n\n\n TEST PREDICTION \n\n\n")
print("\n Prediction should be:")
print(test_labels)
print("\n Prediction")
print(pred)
print("\n\n\n TEST PREDICTION END \n\n\n")
#num_classes: The possible number of labels the classification task can
confusion = tf.math.confusion_matrix(
labels=tf.constant(test_labels),
predictions=tf.constant(pred),
num_classes=9)
print(confusion)
print("Loss: {}, RMSE: {}, Accuracy: {}".format(loss, rmse, acc))
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter._experimental_lower_tensor_list_ops = False
tflite_model = converter.convert()
# Save the model to disk
open("model.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter._experimental_lower_tensor_list_ops = False
tflite_model = converter.convert()
# Save the model to disk
open("model_quantized.tflite", "wb").write(tflite_model)
basic_model_size = os.path.getsize("model.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("model_quantized.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
if __name__ == "__main__":
#print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m")
parser.add_argument("--modelnumber", "-mn")
args = parser.parse_args()
#args.model = "LSTM"
#args.modelnumber = "0"
#seq_length data window sizes tested = 2988, 128, 640, 64, 10
#wenn die seq_length sehr klein model ungenauer bzw größerer RMSE ??? why -> weil das fenster zu klein und das model somit keinen gescheiten zusammenhang erkennen kann ??
#seq_length = 128 -> RMSE 1.378 -> early stop 17 epochs
#seq_length = 20 # RMSE LSTM -> 2.3 -> 10 Epochs
#seq_length = 128 # RMSE LSTM -> 1.7 -> 10 Epochs
seq_length = 20
print("Start to load data...")
train_len, train_data, valid_len, valid_data, test_len, test_data = \
load_data("./Data/train/train.json", "./Data/valid/valid.json", "./Data/test/test.json", seq_length)
print("Start to build net...")
model, model_path = build_net(args, seq_length)
logdir = "logs/scalars/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + model_name
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
print("Start training...")
train_net(model, model_path, train_len, train_data, valid_len, valid_data,
test_len, test_data, args.model)
print("Training finished!")
#LIST OF TESTED LSTM MODELS
"""
#Loss: 2.5077505111694336, RMSE: 1.583587884902954 -> 5 epochs
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(20),
input_shape=(seq_length, 10)), # output_shape=(batch, 44)
#tf.keras.layers.Dropout(0.2),
#tf.keras.layers.Flatten(),
tf.keras.layers.Dense(11, activation="sigmoid") # (batch, 4)
])
model.summary()
"""
"""
#good model 2 -> RMSE 1.4 ohne dropout layer 24epochs batch 64 seq 20-> mit dropout layer RMSE
#22.11.2023 - 14:34
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(100, return_sequences = True),
input_shape=(seq_length, 10)), # output_shape=(batch, 44)
tf.keras.layers.LSTM(100),
tf.keras.layers.Dropout(0.2),
#tf.keras.layers.Dense(11, activation="sigmoid") # (batch, 4)
tf.keras.layers.Dense(11)#, activation="relu") # (batch, 4)
#tf.keras.layers.Dense(11, activation="linear") # (batch, 4)
])
"""
"""
model = tf.keras.Sequential([
tf.keras.layers.InputLayer((seq_length,15)),
#tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(100),
#tf.keras.layers.LSTM(50),
#tf.keras.layers.Dense(8, activation = 'relu'),
#tf.keras.layers.Dense(30, activation = 'relu'),
tf.keras.layers.Dense(11, activation = 'linear')
#tf.keras.layers.Dense(11, activation = 'softmax')
])
"""
"""
model = tf.keras.Sequential([
tf.keras.layers.InputLayer((seq_length,15)),
#tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(15, return_sequences = True),
tf.keras.layers.LSTM(30),
tf.keras.layers.Dense(15),
#tf.keras.layers.LSTM(50),
#tf.keras.layers.Dense(8, activation = 'relu'),
#tf.keras.layers.Dense(30, activation = 'relu'),
##tf.keras.layers.Dropout(0.1),
##tf.keras.layers.Flatten(),
tf.keras.layers.Dense(11, activation = 'softmax')
#tf.keras.layers.Dense(11, activation = 'softmax')
])
"""
"""
n_features = 15
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer((seq_length,n_features)))
model.add(tf.keras.layers.LSTM(15, return_sequences = True))
model.add(tf.keras.layers.LSTM(100, return_sequences = True))
model.add(tf.keras.layers.LSTM(50))
#model.add(tf.keras.layers.Dense(8, activation = 'relu'))
model.add(tf.keras.layers.Dense(11, activation = 'linear'))
model.summary()
"""
"""
#seq 2000 batch 16 -> RMSE 1.41 after 6 epochs
n_features = 15
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer((seq_length,n_features)))
model.add(tf.keras.layers.LSTM(100))
#model.add(tf.keras.layers.LSTM(100, return_sequences = True))
#model.add(tf.keras.layers.LSTM(50))
#model.add(tf.keras.layers.Dense(8, activation = 'relu'))
model.add(tf.keras.layers.Dense(11, activation = 'linear'))
model.summary()
"""
"""
n_features = 15
model = tf.keras.Sequential()
model.add(tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(100),
input_shape=(seq_length, 15)))
##model.add(tf.keras.layers.InputLayer((seq_length,n_features)))
##model.add(tf.keras.layers.LSTM(100))
###model.add(tf.keras.layers.LSTM(100))
###model.add(tf.keras.layers.LSTM(100))
#model.add(tf.keras.layers.LSTM(100, return_sequences = True))
#model.add(tf.keras.layers.LSTM(50))
#model.add(tf.keras.layers.Dense(8, activation = 'relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(11, activation="linear"))
model.summary()
"""
"""
#WORKING 0.9 RMSE
model = tf.keras.Sequential([
tf.keras.layers.InputLayer((seq_length,15)),
tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(100, return_sequences = True),
tf.keras.layers.LSTM(50),
#tf.keras.layers.Dense(8, activation = 'relu'),
tf.keras.layers.Dense(30, activation = 'relu'),
tf.keras.layers.Dense(11, activation = 'linear')
#tf.keras.layers.Dense(11, activation = 'softmax')
])
"""
"""
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(100),
input_shape=(seq_length, 15)),
#tf.keras.layers.LSTM(100, return_sequences = True),
#tf.keras.layers.LSTM(100, return_sequences = True),
#tf.keras.layers.LSTM(50),
tf.keras.layers.Dense(8, activation = 'relu'),
tf.keras.layers.Dense(1, activation = 'linear')
])
"""
"""
model = tf.keras.Sequential
model.add(tf.keras.layers.InputLayer((seq_length,15)))
model.add(tf.keras.layers.LSTM(100, return_sequences = True))
model.add(tf.keras.layers.LSTM(100, return_sequences = True))
model.add(tf.keras.layers.LSTM(50))
model.add(tf.keras.layers.Dense(8, activation = 'relu'))
model.add(tf.keras.layers.Dense(1, activation = 'linear'))
"""
#LIST OF TESTED CNN MODELS
"""
model_0 = tf.keras.Sequential(
[
#tf.keras.layers.Input(shape=input_shape),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer='he_uniform'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(0.5),
#tf.keras.layers.Dense(num_classes_0, activation='softmax')
]
)
"""
"""
#good model
n_features = 10
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer((seq_length,n_features)))
#model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LSTM(70, return_sequences = True))
#model.add(tf.keras.layers.BatchNormalization())
#model.add(tf.keras.layers.LSTM(100, return_sequences = True))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.LSTM(50))
#model.add(tf.keras.layers.Dense(8, activation = 'relu'))
##model.add(tf.keras.layers.Dense(11, activation = 'linear'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(11, activation = 'linear'))
model.summary()
""" | leahimJarun/SensoGripProjectAiModel | train.py | train.py | py | 18,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.reshape",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.product",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.Sequential",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorfl... |
74031939303 | import json
import sys
import aes_functions
import rsa_functions
from exceptions.Exceptions import IncorrectData
from socket_class import SOCKET_SIMPLE_TCP
def receiveAESMessage(s):
return s.receive(), s.receive(), s.receive()
def checkMessageGCM(key, iv, cif, mac):
res = aes_functions.decipherAES_GCM(key, iv, cif, mac)
if res is not False:
return res
else:
print("AIUDAAAA :(")
print("Corrupted Message")
def sendAESMessage(socket, criptograma, mac, nonce):
socket.send(criptograma)
socket.send(mac)
socket.send(nonce)
def bob_socket(port):
return SOCKET_SIMPLE_TCP('127.0.0.1', port)
class Bob:
def __init__(self):
self.name = "Bob"
self.port = 5552
self.PK_BOB = rsa_functions.create_RSAKey()
self.KBT = aes_functions.create_AESKey()
self.KPT = rsa_functions.load_RSAKey_Public("TTP.pub")
def savePK(self):
return rsa_functions.save_RSAKey_Public("Bob.pub", self.PK_BOB)
if __name__ == '__main__':
"""--STEP 0--"""
bob = Bob()
bob.savePK()
print(bob.PK_BOB.public_key().export_key())
try:
socket = bob_socket(bob.port)
socket.connect()
except Exception as e:
sys.exit(f"An error occurred creating the socket with TTP: {e}")
"""--STEP 2--"""
print("Establishing a connection with TTP...")
try:
engineKAT = aes_functions.startAES_GCM(bob.KBT)
print("Sending data to TTP...")
message = [bob.name, bob.KBT.hex()]
json_AT = json.dumps(message)
print("Message B -> T (decryption): " + json_AT)
# Encrypt data
encrypted_message = rsa_functions.cipherRSA_OAEP(json_AT.encode("utf-8"), bob.KPT.public_key())
encrypted_signature = rsa_functions.signatureRSA_PSS(bob.KBT.hex().encode("utf-8"), bob.PK_BOB)
# Send encrypted data
socket.send(encrypted_message)
socket.send(encrypted_signature)
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 2: {e}")
finally:
print("END STEP 2")
input("Press any key to continue")
"""--Step 5--"""
try:
socket = bob_socket(5555)
socket.listen()
except Exception as e:
sys.exit(f"An error occurred creating the socket with Alice: {e}")
try:
print("Waiting for Alice...")
msg = socket.receive()
cipher_BT, mac_BT, iv_BT, cif_AB, mc_AB, iv_AB = json.loads(msg)
decrypted_message = checkMessageGCM(bob.KBT, bytes.fromhex(iv_BT), bytes.fromhex(cipher_BT),
bytes.fromhex(mac_BT))
TS, KAB = json.loads(decrypted_message.decode('utf-8'))
KAB = bytearray.fromhex(KAB)
decrypted_message = checkMessageGCM(KAB, bytes.fromhex(iv_AB), bytes.fromhex(cif_AB),
bytes.fromhex(mc_AB))
sessionName, aux = json.loads(decrypted_message)
if sessionName != 'Alice' and aux != TS:
raise IncorrectData("Possible data modification during communication")
else:
print("Reliable data, continued")
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 5: {e}")
finally:
print("END STEP 5")
input("Press any key to continue")
"""--Step 6--"""
try:
resolution = float(TS) + 1
engineKAB = aes_functions.startAES_GCM(KAB)
cif, mac, iv = aes_functions.cipherAES_GCM(engineKAB, str(resolution).encode("utf-8"))
sendAESMessage(socket, cif, mac, iv)
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 6: {e}")
finally:
print("END STEP 6")
input("Press any key to continue")
"""--Step 7--"""
try:
print("Waiting for Alice")
cif, mac, iv = receiveAESMessage(socket)
textoClaro = checkMessageGCM(KAB, iv, cif, mac)
msg = textoClaro.decode("utf-8")
print("Message ->" + msg)
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 7: {e}")
finally:
print("END STEP 7")
input("Press any key to continue")
"""--Step 8--"""
try:
msg = "Hello Word!"
engineKAB = aes_functions.startAES_GCM(KAB)
cif, mac, iv = aes_functions.cipherAES_GCM(engineKAB, msg.encode("utf-8"))
sendAESMessage(socket, cif, mac, iv)
except Exception as e:
socket.close()
sys.exit(f"An error occurred in step 8: {e}")
finally:
print("END STEP 8")
| makrron/simplified-kerberos-protocol | p-b.py | p-b.py | py | 4,633 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aes_functions.decipherAES_GCM",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "socket_class.SOCKET_SIMPLE_TCP",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "rsa_functions.create_RSAKey",
"line_number": 37,
"usage_type": "call"
},
... |
32967623992 | from django import forms
from django.core.exceptions import ValidationError
from arcana_app.models import Driver, Truck, Trailer, Insurance, Freight
class DateInput(forms.DateInput):
input_type = 'date'
class TimeInput(forms.TimeInput):
input_type = 'time'
# class CheckboxInput(forms.CheckboxInput):
# input_type = 'checkbox'
class AddDriverForm(forms.ModelForm):
class Meta:
model = Driver
fields = '__all__'
widgets = {
'birth_date': DateInput(),
}
def __init__(self, *args, **kwargs):
super(AddDriverForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class AddTruckForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddTruckForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Truck
fields = '__all__'
widgets = {
'begin_MOT': DateInput(),
'expire_MOT': DateInput(),
}
# widgets = {
# 'has_actual_MOT': forms.CheckboxInput(
# attrs={'class': 'required checkbox form-select', 'disabled': 'disabled or true'}),
# }
class AddTrailerForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddTrailerForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Trailer
fields = '__all__'
widgets = {
'begin_MOT': DateInput(),
'expire_MOT': DateInput(),
}
class AddInsuranceForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddInsuranceForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Insurance
fields = '__all__'
def clean(self):
data = super().clean()
if not data['begin_date'] <= data['end_date']:
raise ValidationError("Begin date can't be earlier than end date!")
return data
class AddFreightForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddFreightForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Freight
fields = '__all__'
widgets = {
'date_of_loading': DateInput(),
'date_of_unloading': DateInput(),
'hour_of_loading': TimeInput(),
'hour_of_unloading': TimeInput(),
}
| KamilNurzynski/Arcana | arcana_app/forms.py | forms.py | py | 2,848 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.forms.DateInput",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.forms.TimeInput",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "dj... |
70077253225 | from __future__ import print_function
import requests, lxml.html
headers = {'user-agent': 'taco'}
urls_to_check = [
'http://www.packtpub.com/application-development/python-data-structures-and-algorithm',
'https://www.packtpub.com/big-data-and-business-intelligence/learning-data-mining-python-second-edition',
'https://www.packtpub.com/big-data-and-business-intelligence/neural-network-programming-python',
'https://www.packtpub.com/application-development/python-programming-blueprints'
]
print()
for url in urls_to_check:
title = url.split('/')[-1].replace('-', ' ').title()
print('Checking for title: %s'%title)
page = requests.get(url, headers=headers).content
tree = lxml.html.fromstring(page)
if not tree.cssselect('.title-preorder') and not tree.cssselect('.alpha-text'):
print('\t\n%s [READY FOR DOWNLOAD]\n'%title)
else:
print('\t\t\t\t\t\t\t(negative)')
url = 'https://www.packtpub.com/packt/offers/free-learning'
print('Checking the [FREE] title...')
page = requests.get(url, headers=headers).content
tree = lxml.html.fromstring(page)
print('\n\tFree Book: %s\n'%tree.cssselect('.dotd-title h2')[0].text_content().strip())
| chris-hamberg/scrapers | packt.py | packt.py | py | 1,212 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "lxml.html.html.fromstring",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "lxml.html.html",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "lxml.html"... |
41709462249 | import unittest
import json
from django.test import TestCase
from datetime import datetime
from utente.models import Utente, Prodotto, ProdottoCarrello, Carrello, Pagamento, Ordine
from vetrine.models import Vetrina, VetrinaAmministratore, ResocontoVendite
# test della creazione di un utente e verifica del relativo carrello associato #test ok
class UtenteTest(TestCase):
def test_save_creates_carrello(self):
utente = Utente.objects.create(username='testuser', email='test@example.com')
carrello = Carrello.objects.get(possessore=utente)
self.assertEqual(carrello.possessore, utente)
# test creazione di un prodotto e verifica della quantità #test ok
class ProdottoTest(TestCase):
def setUp(self):
self.prodotto = Prodotto.objects.create(
nome='Prodotto di test',
codice_seriale=12345,
tipologia='Test',
descrizione='Descrizione di test',
prezzo=9.99,
disponibilita=10
)
def test_creazione_prodotto(self):
self.assertEqual(self.prodotto.nome, 'Prodotto di test')
self.assertEqual(self.prodotto.codice_seriale, 12345)
self.assertEqual(self.prodotto.tipologia, 'Test')
self.assertEqual(self.prodotto.descrizione, 'Descrizione di test')
self.assertEqual(self.prodotto.prezzo, 9.99)
self.assertEqual(self.prodotto.disponibilita, 10)
def test_aggiunta_quantita_venduta(self):
self.assertEqual(self.prodotto.pezzi_venduti, 0)
self.prodotto.pezzi_venduti = 5
self.assertEqual(self.prodotto.pezzi_venduti, 5)
def test_riduzione_disponibilita(self):
self.assertEqual(self.prodotto.disponibilita, 10)
self.prodotto.disponibilita -= 3
self.assertEqual(self.prodotto.disponibilita, 7)
def test_guadagno_totale(self):
self.assertEqual(self.prodotto.guadagno_totale, 0)
self.prodotto.pezzi_venduti = 5
self.assertEqual(self.prodotto.guadagno_totale, 49.95) # 5 * 9.99
def tearDown(self):
self.prodotto.delete()
# test di aggiunta di un prodotto al carrello #test ok
class ProdottoCarrelloTest(TestCase):
def test_str_method(self):
utente = Utente.objects.create(username='testuser', email='test@example.com')
vetrina = Vetrina.objects.create(ID_vetrina='Test Vetrina')
resoconto = ResocontoVendite.objects.create(ID_resoconto='Test Resoconto')
prodotto = Prodotto.objects.create(
nome='Test Prodotto',
codice_seriale=1,
vetrina=vetrina,
resoconto_vendite=resoconto
)
prodotto_carrello = ProdottoCarrello.objects.create(utente=utente, prodotto=prodotto)
self.assertEqual(str(prodotto_carrello), str(prodotto))
# test creazione di un carrello
class CarrelloTest(TestCase): #test ok
def test_str_method(self):
utente = Utente.objects.create(username='testuser', email='test@example.com')
carrello, _ = Carrello.objects.get_or_create(possessore=utente)
self.assertEqual(carrello.__str__(), 'testuser')
# test impostazione e verifica del pagamento
class PagamentoTest(TestCase): #test ok
def test_str_method(self):
pagamento = Pagamento.objects.create(numero_carta=1234567890)
self.assertEqual(pagamento.numero_carta, 1234567890)
# test di creazione di un ordine #test ok
class OrdineTest(TestCase):
def test_str_method(self):
ordine, _ = Ordine.objects.get_or_create(
numero_ordine='1',
carrello=json.dumps([]),
data_ordine=datetime.now(),
numero_carta='1234567890' # Fornisci un numero di carta valido qui
)
self.assertEqual(ordine.numero_ordine, '1')
if __name__ == '__main__':
unittest.main() | MattiaCani/Progetto-ISW | progettoISW/test_unitari/test_models_utente.py | test_models_utente.py | py | 3,801 | python | it | code | 1 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "utente.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "utente.models.Utente.objects.create",
"line_number": 12,
"usage_type": "call"
},
{
"api_name":... |
31056930437 | import os.path
from flask import Flask
from flaskext.sqlalchemy import SQLAlchemy
CONFIG_FILEPATH = os.path.join(os.path.dirname(__file__), "../config.cfg")
def auto_register_modules(app):
"""Registers modules from :mod:`subleekr` to application."""
import subleekr
for modname in subleekr.__modules__:
__import__("{0}.{1}".format(subleekr.__name__, modname))
module = getattr(subleekr, modname)
module.app.super_app = app
app.register_module(module.app)
def create_app(__name__=__name__):
app = Flask(__name__)
try:
app.config.from_pyfile(CONFIG_FILEPATH)
except IOError:
pass
auto_register_modules(app)
app.db = SQLAlchemy(app)
return app
| sublee/subleekr | subleekr/app.py | app.py | py | 732 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"... |
71335938983 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rightSideView(self, root: Optional[TreeNode]) -> List[int]:
height = 0
subtree = root
def height(subtree):
if not subtree :
return 0
return max(height(subtree.left), height(subtree.right))+1
tree_height = height(root)
result =[-1 for x in range(tree_height)]
import collections
q = collections.deque()
q.append((root, 0))
while q:
x, level = q.popleft()
if not x:
continue
if x.left:
q.append((x.left,level+1))
if x.right:
q.append((x.right, level+1))
result[level] = x.val
return result
| architjee/solutions | Leetcode/right side view of binary tree.py | right side view of binary tree.py | py | 932 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 18,
"usage_type": "call"
}
] |
2509822081 | # Iris Recognition
# 04. Module to match iris descriptions.
# Language: Python 3
import numpy
import cv2
ROTATIONS = [-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def _rotate_norm_image(image, rotation):
output = numpy.zeros(image.shape, image.dtype)
if rotation == 0:
return image
else:
output[:, rotation:] = image[:, :-rotation]
output[:, :rotation] = image[:, -rotation:]
return output
def _compute_norm_hamming_distance(description_1, mask_1, description2, mask_2):
comb_mask = cv2.bitwise_and(mask_1, mask_2)
bit_up_count = numpy.sum(comb_mask > 0)
xor_output = cv2.bitwise_xor(description_1, description2)
xor_output = cv2.bitwise_and(xor_output, xor_output, mask=comb_mask)
dist = numpy.sum(xor_output > 0)
return float(dist) / bit_up_count
def match(descriptions_1, mask_1, descriptions_2, mask_2):
rot_distances = []
for rotation in ROTATIONS:
distances = []
for i in range(len(descriptions_1)): # could be "for i in range(len(descriptions_2)):"
desc_1 = descriptions_1[i]
rot_desc_2 = _rotate_norm_image(descriptions_2[i], rotation)
rot_mask_2 = _rotate_norm_image(mask_2, rotation)
distances.append(_compute_norm_hamming_distance(desc_1, mask_1, rot_desc_2, rot_mask_2))
rot_distances.append(numpy.mean(distances))
print('[INFO] Computed normalized Hamming distance.')
return numpy.min(rot_distances)
| EmmanuelOlofintuyi/Biometrics | Iris Recognition/d_match_iris.py | d_match_iris.py | py | 1,516 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_xor",
"line_num... |
3112497590 | #!/usr/bin/env python3.7
import argparse
import json
import sys
def matches(parts, subject):
if len(parts) == 0:
yield subject
return
part, *rest = parts
# If we're extracting something from `subject`, and `subject` is neither a
# list nor a dict, then there's nothing to extract. Whether this is an
# error or just a no-op was part of how my original solution was wrong.
if type(subject) not in [list, dict]:
return
if type(subject) is list:
if part == '*':
for child in subject:
yield from matches(rest, child)
return
try:
index = int(part)
except ValueError:
return # can't extract a property name from a list
yield from matches(rest, subject[index])
else:
assert type(subject) is dict
if part == '*':
for child in subject.values():
yield from matches(rest, child)
elif part in subject:
yield from matches(rest, subject[part])
def parse(pattern):
# Corner case: If the pattern is empty, then splitting on "." would yield
# `[""]` instead of `[]`.
if len(pattern) == 0:
return []
else:
return pattern.split('.')
def extract(pattern, subject):
parts = parse(pattern)
results = list(matches(parts, subject))
# If there were no wildcards in the query, then at most one thing can be
# matched. Avoid the redundant outer list when possible.
if '*' in parts:
return results # list of results
if len(results) == 0:
return None
assert len(results) == 1
return results[0]
def parse_command_line(args):
parser = argparse.ArgumentParser(description='Extract values from JSON.')
parser.add_argument('pattern',
help='JSON query (path) to extract from input')
return parser.parse_args()
if __name__ == '__main__':
options = parse_command_line(sys.argv[1:])
result = extract(options.pattern, json.load(sys.stdin))
if result is not None:
json.dump(result, sys.stdout, indent=4, sort_keys=True)
print() # for the newline
| dgoffredo/jex | src/jex.py | jex.py | py | 2,192 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line... |
246221889 | import datetime
import pandas as pd
from helper.fetch import Fetch
from helper.dynamic_scrape import DynamicScrape
from helper.property_helper import PropertyHelper
class Dhalia(object):
source = 'Dhalia'
columns = [
'Reference', 'Town', 'Type', 'Stage',
'Bedrooms', 'Bathrooms',
'TotalSqm', 'IntArea', 'ExtArea', 'Price'
]
@staticmethod
def fetch_data(is_sale: bool) -> pd.DataFrame:
data = pd.DataFrame()
proxies = Fetch.load_proxies()
page_type = 'buy' if is_sale else 'rent'
page_element = f'//div[@class="searchForm searchForm--quick-search page-{page_type}"]'
driver = Fetch.get_dynamic(f'https://www.dhalia.com/{page_type}/?pageIndex=1', proxies, page_element, True)
x_pages = '//li[@class="pager__last"]/a'
DynamicScrape.await_element(driver, x_pages)
pages = int(DynamicScrape.get_link(driver, x_pages).split('=')[1])
for page in range(1, pages+1):
x_links = '//a[@class="propertybox"]'
links = DynamicScrape.get_links(driver, x_links)
listing = []
x_features = './/div[@class="property-top__col__part property-top__col__part--others"]/span'
x_type_town = './/div[@class="property-top__col"]/h1'
x_description = './/div[@class="description write-up"]'
x_price = './/div[@class="property-top__col__part property-top__col__part--price"]'
for i, link in enumerate(links):
page_element = '//section[@class="property-detail-wrapper"]'
successful = DynamicScrape.open_tab_link(driver, link, page_element)
if successful:
features = DynamicScrape.get_texts(driver, x_features)
reference = [feature for feature in features if 'Ref: ' in feature]
reference = reference[0].replace('Ref: ', '').strip() if len(reference) else None
type_town = DynamicScrape.get_text(driver, x_type_town)
town = type_town.split(' in ')[1].strip()
type = type_town.split(' in ')[0].strip()
stage = PropertyHelper.determine_stage(driver, x_description, is_sale)
bedrooms = [side_info for side_info in features if 'Bedrooms' in side_info]
bedrooms = bedrooms[0].replace('Bedrooms', '') if len(bedrooms) else None
bathrooms = [side_info for side_info in features if 'Bathrooms' in side_info]
bathrooms = bathrooms[0].replace('Bathrooms', '') if len(bathrooms) else None
area = [side_info for side_info in features if 'm²' in side_info]
area = area[0].replace('m²', '').split('/') if len(area) else None
total_sqm = area[0] if area else None
int_area = area[1] if area else None
ext_area = area[2] if area else None
price = DynamicScrape.get_text(driver, x_price)
price = price.replace('€', '').replace(',', '')
try:
if ' daily' in price:
price = int(price.replace(' daily', '')) * 30
elif ' monthly' in price:
price = int(price.replace(' monthly', ''))
elif ' yearly' in price:
price = round(int(price.replace(' yearly', '')) / 12)
except ValueError:
price = None
listing.append([
reference, town, type, stage,
bedrooms, bathrooms,
total_sqm, int_area, ext_area, price
])
DynamicScrape.close_tab_link(driver)
print(
'%s\t %s\t Page %03d of %03d\t Entry %03d of %03d' %
(datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S"), Dhalia.source + ' ' + page_type.title(), page, pages, i+1, len(links))
)
# Concatenate previous data frame with data of current page
page_data = pd.DataFrame(listing, columns=Dhalia.columns)
data = pd.concat([data, page_data])
# Click Next Page
x_next_page = f'//ul[@class="pager"]/li/a/span[text()="{page+1}"]'
x_await_page = f'//ul[@class="pager"]/li[@class="pager__current"]/a/span[text()="{page+1}"]'
DynamicScrape.click_element(driver, x_next_page, x_await_page)
# Add source and rename columns
data.insert(0, 'Is_Sale', is_sale)
data.insert(1, 'Source', Dhalia.source)
# Close Driver
Fetch.dynamic_close_browser(driver)
# Return the data
return data
@staticmethod
def fetch_res_sale():
return Dhalia.fetch_data(True)
@staticmethod
def fetch_res_rent():
return Dhalia.fetch_data(False)
@staticmethod
def fetch_all(file_path: str) -> None:
# Fetching data
res_sale = Dhalia.fetch_res_sale()
res_rent = Dhalia.fetch_res_rent()
# Concatenate Data
data = pd.concat([res_sale, res_rent])
# Save data frame to CSV file
data.to_csv(file_path, index=False)
| brandonabela/Malta-Property-Analysis | src/export/dhalia.py | dhalia.py | py | 5,369 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "helper.fetch.Fetch.load_proxies",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "helper.fetch.Fetch",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "h... |
70656671785 | """
Some of code was taken from https://pytorch.org/vision/stable/_modules/torchvision/models/resnet.html
"""
import torch
from torch import Tensor, nn
from typing import Optional, List
from torchvision.models import resnet18
def conv3x3(input_size: int, output_size: int, stride: int = 1) -> nn.Conv2d:
return nn.Conv2d(input_size, output_size, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(input_size: int, output_size: int, stride: int = 1) -> nn.Conv2d:
return nn.Conv2d(input_size, output_size, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
def __init__(
self,
input_size: int,
output_size: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
):
super().__init__()
self.conv1 = conv3x3(input_size, output_size, stride)
self.bn1 = nn.BatchNorm2d(output_size)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(output_size, output_size)
self.bn2 = nn.BatchNorm2d(output_size)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNetCNN(nn.Module):
"""
Realizes ResNet-like neural network for one-dimentional pictures.
"""
def __init__(
self,
layers: List[int] = None,
output_size: int = 128,
):
super().__init__()
if layers is None:
layers = [2, 2, 2]
if len(layers) != 3:
raise ValueError(
f'List of layers should have 3 elements, got {len(layers)}')
self.relu = nn.ReLU()
self.output = output_size
self.input_size = 128
self.layer0 = nn.Sequential(
nn.Conv2d(1, self.input_size, kernel_size=7, padding=3),
nn.BatchNorm2d(self.input_size),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.layer1 = self._make_layer(128, layers[0])
self.layer2 = self._make_layer(256, layers[1], stride=2)
self.layer3 = self._make_layer(512, layers[2], stride=2)
self.downsample = conv1x1(512, self.output)
def _make_layer(self, output_size: int, blocks: int, stride: int = 1) -> nn.Sequential:
downsample = None
if stride != 1 or self.input_size != output_size:
downsample = nn.Sequential(
conv1x1(self.input_size, output_size, stride),
nn.BatchNorm2d(output_size),
)
layers = [BasicBlock(self.input_size, output_size, stride, downsample)]
self.input_size = output_size
for _ in range(1, blocks):
layers.append(BasicBlock(self.input_size, output_size))
return nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
# (batch_size, output_channels, height, width)
x = self.downsample(x)
return x.squeeze(0) # (output_channels, height, width)
class CNN(nn.Module):
def __init__(self, output_size: int = 128):
super().__init__()
self.input_size = 64
self.layer0 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((1, 2))
)
self.layer1 = self._make_layer(128)
self.layer2 = self._make_layer(256)
self.layer3 = self._make_layer(512)
self.downsample = nn.Sequential(
conv3x3(self.input_size, output_size),
nn.BatchNorm2d(output_size),
nn.ReLU()
)
def forward(self, x: Tensor) -> Tensor:
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.downsample(x)
return x.squeeze(0)
def _make_layer(self, output_size: int) -> nn.Sequential:
layer = nn.Sequential(
conv3x3(self.input_size, output_size),
nn.BatchNorm2d(output_size),
nn.ReLU(),
conv3x3(output_size, output_size),
nn.BatchNorm2d(output_size),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.input_size = output_size
return layer
| timtibilov/AttentionOCR | src/model/cnn.py | cnn.py | py | 4,622 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "torch.nn.Conv2d",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_nu... |
74678278503 | from django.contrib.auth.decorators import permission_required , login_required
from django.shortcuts import render , redirect , get_object_or_404,HttpResponseRedirect , HttpResponse
from form_1.forms import Form1Form
from form_1.models import Form1
from giris.views import login_view
from .models import Form3 as Form3Model , Form3 , Malzeme
from .forms import Form3Form , MalzemeForm
from django.db.models import Q
import mimetypes
from form2.models import Form2
from form4.models import Form4
@login_required(login_url=login_view)
# Create your views here.
def form3_view(request):
if request.user.is_staff or request.user.is_superuser:
listem = Form3Model.objects.all().order_by('Form33__id')
else:
listem = Form3Model.objects.all().order_by('Form33__id').filter(Olusturan=request.user)
"""query = request.GET.get('q')
if query:
listem = listem.filter(
Q(id=int(query)) |
Q(Olusturan__username__icontains=query) |
Q(isin_kategorisi__icontains=query) |
Q(Aciklama__icontains=query)
).distinct()"""
return render(request , 'Form3/Form3.html' , {'listem': listem,'islemde':
[islemde.Form44 for islemde in Form4.objects.all() if islemde.Form44]
,'malzeme':malzeme })
@login_required(login_url=login_view)
def create(request,form1):
form3=Form3Form(request.POST or None, request.FILES or None)
context = {'form3': form3}
if request.method == "POST":
if form3.is_valid():
a=form3.save(commit=False)
a.Form33 = Form2.objects.get(Form22=form1)
a.Olusturan = request.user
a.save()
if a.isin_kategorisi=='Malz.Tedariği':
return redirect('create_malzeme',form3=a.id)
return redirect(form3_view)
return render(request, 'Form3/create.html', context)
@login_required(login_url=login_view)
def detail(request,pk):
listem = get_object_or_404(Form3Model, id=pk)
context = {'listem': listem}
return render(request , 'Form3/detail.html', context)
@login_required(login_url=login_view)
@permission_required('form3.delete_form3',login_url=form3_view)
def delete(request , pk):
listem = Form3Model.objects.get(id=pk)
listem.delete()
context = {'listem': listem}
return redirect('form3_view')
@login_required(login_url=login_view)
@permission_required('form3.change_form3',login_url=form3_view)
def update(request,pk):
listem = get_object_or_404(Form3Model , id=pk)
form3 = Form3Form(request.POST or None ,request.FILES or None, instance=listem)
if form3.is_valid():
form3.save()
return redirect('form3_view')
context = {'form3': form3}
return render(request, 'Form3/create.html', context)
@login_required(login_url=login_view)
def download(request , pk):
listem = get_object_or_404(Form3Model , id=pk)
file_path = listem.dosya.path
file_name = str(listem.dosya)
fh = open(file_path , 'rb')
mime_type , _ = mimetypes.guess_type(file_path)
response = HttpResponse(fh , content_type=mime_type)
response['Content-Disposition'] = f"attachment; filename={file_name}"
return response
@login_required(login_url=login_view)
def create_malzeme(request,form3):
malzeme_tedarik=MalzemeForm(request.POST or None)
context={'malzeme_tedarik':malzeme_tedarik}
if request.method=='POST' and malzeme_tedarik.is_valid():
b=malzeme_tedarik.save(commit=False)
b.Form333=Form3.objects.get(id=form3)
b.save()
return render(request,'Form3/MalzemeTedarik.html',context)
@login_required(login_url=login_view)
def malzeme(request,form3):
malzemeler=Malzeme.objects.filter(Form333=form3)
context={'malzemeler':malzemeler}
return render(request,'Form3/malzemeview.html',context)
| orhunakar01/hekimbey01 | form3/views.py | views.py | py | 3,907 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "models.Form3.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Form3.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.Form3",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "model... |
18845748066 | import os,sys,shutil,multiprocessing
sys.path.append("..")
from base.get_config import MyConfig as myconfig
pid=multiprocessing.current_process().pid#获取pid进程编号
folderpath=myconfig("project","project_path").value+myconfig("project","data_path").value
def folder_create(path=None):
if path:
path=path
else:
path=folderpath+"/"+"testsuite-pid-"+str(pid)
if os.path.exists(path)==False:
os.mkdir(path)
return path
def folder_clear(path=folderpath):
path=os.path.abspath(path)#转换为绝对路径
#print("目录名称",os.path.dirname(path))
if path.split("\\")[len(path.split("\\"))-1]=="runningdata":#如果是runningdata目录,name操作删除
for root,dirs,filename in os.walk(path,False):
#print("-------------------------------------------------")
#print(str(root),"||",str(dirs)+"||"+str(filename))
for dir in dirs:
if dir!="data_debug" and dir!="data_running" and root==path:
shutil.rmtree(path+"/"+dir)
else:
print("清空目录:"+str(path)+"下文件夹,谨慎操作!")
if __name__=="__main__":
print("创建了文件:",folder_create())
print(folder_clear()) | cainiaosun/study | 测试/自动化合并/autotest/base/web_ui/running_folder.py | running_folder.py | py | 1,130 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "base.get_c... |
154684037 | from django.urls import path
from home import views
urlpatterns = [
path('sign', views.sign, name='sign'),
path('', views.loginp, name='loginp'),
path('logoutp', views.logoutp, name='logoutp'),
path('base', views.base, name='base'),
path('mainhome', views.mainhome, name='home'),
# path('accounts/login/', views.predict_demand_supply_dtree, name='predict'),
path('predict/', views.predict_demand_supply_dtree, name='predict'),
path('prediction_results', views.predict_demand_supply_dtree, name='predictResult'),
path('pcw', views.pcw, name='pcw'),
path('prediction_results2', views.pcw, name='predictResult2'),
path('pcacw', views.pcacw, name='pcacw'),
path('prediction_results3', views.pcacw, name='predictResult3')
]
| Atharv4507/SP | home/urls.py | urls.py | py | 768 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "home.views.sign",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "home.views",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
... |
42243134200 | import sys, time, itertools
import dill as pickle
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp
import scipy.stats as stats
import scipy.optimize as opti
import bead_util as bu
import calib_util as cal
import transfer_func_util as tf
import configuration as config
import warnings
warnings.filterwarnings("ignore")
##################################################################
######################## Script Params ###########################
only_closest = False #True
minsep = 15 # um
maxthrow = 80 # um
beadheight = 10 # um
#data_dir = '/data/20180314/bead1/grav_data/ydrive_1sep_1height_extdrive_nofield_long'
#data_dir = '/data/20180314/bead1/grav_data/ydrive_1sep_1height_nofield_shieldin'
#data_dir = '/data/20180314/bead1/grav_data/ydrive_1sep_1height_1V-1300Hz_shieldin_0mV-cant'
#data_dir = '/data/20180314/bead1/grav_data/ydrive_1sep_1height_2V-2200Hz_shield_0mV-cant'
data_dir = '/data/20180314/bead1/grav_data/ydrive_6sep_1height_shield-2Vac-2200Hz_cant-0mV'
#savepath = '/sensitivities/20180314_grav_shield-2200Hz_cant-m100mV_allharm.npy'
savepath = '/sensitivities/20180314_grav_shieldin-2V-2200Hz_cant-0V_allharm.npy'
save = False
load = False
file_inds = (0, 10)
theory_data_dir = '/data/grav_sim_data/2um_spacing_data/'
tfdate = '' #'20180215'
diag = False
confidence_level = 0.95
lamb_range = (1.7e-6, 1e-4)
#user_lims = [(65e-6, 80e-6), (-240e-6, 240e-6), (-5e-6, 5e-6)]
user_lims = [(5e-6, 80e-6), (-240e-6, 240e-6), (-5e-6, 5e-6)]
#user_lims = []
tophatf = 300 # Hz, doesn't reconstruct data above this frequency
nharmonics = 10
harms = [1,3,5,7]
plot_just_current = False
figtitle = ''
ignoreX = False
ignoreY = False
ignoreZ = False
compute_min_alpha = False
##################################################################
################# Constraints to plot against ####################
alpha_plot_lims = (1000, 10**13)
lambda_plot_lims = (10**(-7), 10**(-4))
#limitdata_path = '/home/charles/opt_lev_analysis/gravity_sim/gravity_sim_v1/data/' + \
# 'decca2_limit.txt'
limitdata_path = '/sensitivities/decca1_limits.txt'
limitdata = np.loadtxt(limitdata_path, delimiter=',')
limitlab = 'No Decca 2'
#limitdata_path2 = '/home/charles/opt_lev_analysis/gravity_sim/gravity_sim_v1/data/' + \
# 'no_decca2_limit.txt'
limitdata_path2 = '/sensitivities/decca2_limits.txt'
limitdata2 = np.loadtxt(limitdata_path2, delimiter=',')
limitlab2 = 'With Decca 2'
##################################################################
##################################################################
##################################################################
# Various fitting functions
def parabola(x, a, b, c):
return a * x**2 + b * x + c
def line(x, a, b):
return a * x + b
def const(x, a):
return a
def flicker(x, a):
return a * (1. / x)
def build_mod_grav_funcs(theory_data_dir):
'''Loads data from the output of /data/grav_sim_data/process_data.py
which processes the raw simulation output from the farmshare code
INPUTS: theory_data_dir, path to the directory containing the data
OUTPUTS: gfuncs, 3 element list with 3D interpolating functions
for regular gravity [fx, fy, fz]
yukfuncs, 3 x Nlambda array with 3D interpolating function
for modified gravity with indexing:
[[y0_fx, y1_fx, ...], [y0_fy, ...], [y0_fz, ...]]
lambdas, np.array with all lambdas from the simulation
lims, 3 element with tuples for (min, max) of coordinate
limits in interpolation
'''
# Load modified gravity curves from simulation output
Gdata = np.load(theory_data_dir + 'Gravdata.npy')
yukdata = np.load(theory_data_dir + 'yukdata.npy')
lambdas = np.load(theory_data_dir + 'lambdas.npy')
xpos = np.load(theory_data_dir + 'xpos.npy')
ypos = np.load(theory_data_dir + 'ypos.npy')
zpos = np.load(theory_data_dir + 'zpos.npy')
if lambdas[-1] > lambdas[0]:
lambdas = lambdas[::-1]
yukdata = np.flip(yukdata, 0)
# Find limits to avoid out of range erros in interpolation
xlim = (np.min(xpos), np.max(xpos))
ylim = (np.min(ypos), np.max(ypos))
zlim = (np.min(zpos), np.max(zpos))
# Build interpolating functions for regular gravity
gfuncs = [0,0,0]
for resp in [0,1,2]:
gfuncs[resp] = interp.RegularGridInterpolator((xpos, ypos, zpos), Gdata[:,:,:,resp])
# Build interpolating functions for yukawa-modified gravity
yukfuncs = [[],[],[]]
for resp in [0,1,2]:
for lambind, yuklambda in enumerate(lambdas):
lamb_func = interp.RegularGridInterpolator((xpos, ypos, zpos), yukdata[lambind,:,:,:,resp])
yukfuncs[resp].append(lamb_func)
lims = [xlim, ylim, zlim]
return gfuncs, yukfuncs, lambdas, lims
def get_data_at_harms(files, gfuncs, yukfuncs, lambdas, lims, \
minsep=20, maxthrow=80, beadheight=5,\
cantind=0, ax1='x', ax2='z', diag=True, plottf=False, \
width=0, nharmonics=10, harms=[], \
ext_cant_drive=False, ext_cant_ind=1, \
ignoreX=False, ignoreY=False, ignoreZ=False, noiseband=10):
'''Loops over a list of file names, loads each file, diagonalizes,
then performs an optimal filter using the cantilever drive and
a theoretical force vs position to generate the filter/template.
The result of the optimal filtering is stored, and the data
released from memory
INPUTS: files, list of files names to extract data
cantind, cantilever electrode index
ax1, axis with different DC positions
ax2, 2nd axis with different DC positions
OUTPUTS:
'''
#parts = data_dir.split('/')
#prefix = parts[-1]
#savepath = '/processed_data/grav_data/' + prefix + '_fildat.p'
#try:
# fildat = pickle.load(open(savepath, 'rb'))
# return fildat
#except:
# print 'Loading data from: ', data_dir
fildat = {}
temp_gdat = {}
for fil_ind, fil in enumerate(files):
bu.progress_bar(fil_ind, len(files), suffix=' Sorting Files, Extracting Data')
### Load data
df = bu.DataFile()
df.load(fil)
df.calibrate_stage_position()
cantbias = df.electrode_settings['dc_settings'][0]
ax1pos = df.stage_settings[ax1 + ' DC']
ax2pos = df.stage_settings[ax2 + ' DC']
if cantbias not in list(fildat.keys()):
fildat[cantbias] = {}
if ax1pos not in list(fildat[cantbias].keys()):
fildat[cantbias][ax1pos] = {}
if ax2pos not in list(fildat[cantbias][ax1pos].keys()):
fildat[cantbias][ax1pos][ax2pos] = []
if ax1pos not in list(temp_gdat.keys()):
temp_gdat[ax1pos] = {}
if ax2pos not in list(temp_gdat[ax1pos].keys()):
temp_gdat[ax1pos][ax2pos] = [[], []]
temp_gdat[ax1pos][ax2pos][1] = [[]] * len(lambdas)
cfind = len(fildat[cantbias][ax1pos][ax2pos])
fildat[cantbias][ax1pos][ax2pos].append([])
if fil_ind == 0 and plottf:
df.diagonalize(date=tfdate, maxfreq=tophatf, plot=True)
else:
df.diagonalize(date=tfdate, maxfreq=tophatf)
if fil_ind == 0:
ginds, fund_ind, drive_freq, drive_ind = \
df.get_boolean_cantfilt(ext_cant_drive=ext_cant_drive, ext_cant_ind=ext_cant_ind, \
nharmonics=nharmonics, harms=harms, width=width)
datffts, diagdatffts, daterrs, diagdaterrs = \
df.get_datffts_and_errs(ginds, drive_freq, noiseband=noiseband, plot=False, \
diag=diag)
drivevec = df.cant_data[drive_ind]
mindrive = np.min(drivevec)
maxdrive = np.max(drivevec)
posvec = np.linspace(mindrive, maxdrive, 500)
ones = np.ones_like(posvec)
start = time.time()
for lambind, yuklambda in enumerate(lambdas):
if ax1 == 'x' and ax2 == 'z':
newxpos = minsep + (maxthrow - ax1pos)
newheight = ax2pos - beadheight
elif ax1 =='z' and ax2 == 'x':
newxpos = minsep + (maxthrow - ax2pos)
newheight = ax1pos - beadheight
else:
print("Coordinate axes don't make sense for gravity data...")
print("Proceeding anyway, but results might be hard to interpret")
newxpos = ax1pos
newheight = ax2pos
if (newxpos < lims[0][0]*1e6) or (newxpos > lims[0][1]*1e6):
#print 'skipped x'
continue
if (newheight < lims[2][0]*1e6) or (newheight > lims[2][1]*1e6):
#print 'skipped z'
continue
pts = np.stack((newxpos*ones, posvec, newheight*ones), axis=-1)
gfft = [[], [], []]
yukfft = [[], [], []]
for resp in [0,1,2]:
if (ignoreX and resp == 0) or (ignoreY and resp == 1) or (ignoreZ and resp == 2):
gfft[resp] = np.zeros(np.sum(ginds))
yukfft[resp] = np.zeros(np.sum(ginds))
continue
if len(temp_gdat[ax1pos][ax2pos][0]):
gfft[resp] = temp_gdat[ax1pos][ax2pos][0][resp]
else:
gforcevec = gfuncs[resp](pts*1e-6)
gforcefunc = interp.interp1d(posvec, gforcevec)
gforcet = gforcefunc(drivevec)
gfft[resp] = np.fft.rfft(gforcet)[ginds]
if len(temp_gdat[ax1pos][ax2pos][1][lambind]):
yukfft[resp] = temp_gdat[ax1pos][ax2pos][1][lambind][resp]
else:
yukforcevec = yukfuncs[resp][lambind](pts*1e-6)
yukforcefunc = interp.interp1d(posvec, yukforcevec)
yukforcet = yukforcefunc(drivevec)
yukfft[resp] = np.fft.rfft(yukforcet)[ginds]
gfft = np.array(gfft)
yukfft = np.array(yukfft)
temp_gdat[ax1pos][ax2pos][0] = gfft
temp_gdat[ax1pos][ax2pos][1][lambind] = yukfft
outdat = (yuklambda, datffts, diagdatffts, daterrs, diagdaterrs, gfft, yukfft)
fildat[cantbias][ax1pos][ax2pos][cfind].append(outdat)
stop = time.time()
#print 'func eval time: ', stop-start
return fildat
def get_alpha_lambda(fildat, diag=True, ignoreX=False, ignoreY=False, ignoreZ=False, \
plot=True, save=False, savepath='', confidence_level=0.95, \
only_closest=False, ax1='x', ax2='z', lamb_range=(1e-9, 1e-2)):
'''Loops over a list of file names, loads each file, diagonalizes,
then performs an optimal filter using the cantilever drive and
a theoretical force vs position to generate the filter/template.
The result of the optimal filtering is stored, and the data
released from memory
INPUTS: fildat
OUTPUTS:
'''
# For the confidence interval, compute the inverse CDF of a
# chi^2 distribution at given confidence level and compare to
# liklihood ratio via a goodness of fit parameter.
# Refer to scipy.stats documentation to understand chi2
chi2dist = stats.chi2(1)
# factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1)
con_val = 0.5 * chi2dist.ppf(confidence_level)
colors = bu.get_color_map(len(lambdas))
alphas = np.zeros_like(lambdas)
diagalphas = np.zeros_like(lambdas)
testalphas = np.linspace(-10**10, 10**10, 11)
minalphas = [[]] * len(lambdas)
biasvec = list(fildat.keys())
biasvec.sort()
ax1posvec = list(fildat[biasvec[0]].keys())
ax1posvec.sort()
ax2posvec = list(fildat[biasvec[0]][ax1posvec[0]].keys())
ax2posvec.sort()
if only_closest:
if ax1 == 'x' and ax2 == 'z':
seps = minsep + (maxthrow - np.array(ax1posvec))
heights = np.array(ax2posvec) - beadheight
sind = np.argmin(seps)
hind = np.argmin(np.abs(heights - beadheight))
ax1posvec = [ax1posvec[sind]]
ax2posvec = [ax2posvec[hind]]
elif ax1 =='z' and ax2 == 'x':
seps = minsep + (maxthrow - np.array(ax2posvec))
heights = np.array(ax1pos) - beadheight
sind = np.argmin(seps)
hind = np.argmin(np.abs(heights - beadheight))
ax1posvec = [ax1posvec[hind]]
ax2posvec = [ax2posvec[sind]]
newlamb = lambdas[(lambdas > lamb_range[0]) * (lambdas < lamb_range[-1])]
tot_iterations = len(biasvec) * len(ax1posvec) * len(ax2posvec) * \
len(newlamb) * len(testalphas) + 1
i = -1
# To test chi2 fit against "fake" data, uncomment these lines
rands = np.random.randn(*fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][0][1].shape)
rands2 = np.random.randn(*fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][0][1].shape)
for lambind, yuklambda in enumerate(lambdas):
#if lambind != 48:
# continue
if (yuklambda < lamb_range[0]) or (yuklambda > lamb_range[1]):
continue
test = fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][lambind]
test_yukdat = test[-1]
test_dat = test[1]
newalpha = 1e-4 * np.sqrt(np.mean(np.abs(test_dat) / np.abs(test_yukdat)))
testalphas = np.linspace(-1.0*newalpha, newalpha, 21)
chi_sqs = np.zeros(len(testalphas))
diagchi_sqs = np.zeros(len(testalphas))
for alphaind, testalpha in enumerate(testalphas):
N = 0
chi_sq = 0
diagchi_sq = 0
for bias, ax1pos, ax2pos in itertools.product(biasvec, ax1posvec, ax2posvec):
i += 1
bu.progress_bar(i, tot_iterations, suffix=' Fitting the Data for Chi^2')
for fil_ind in range(len(fildat[bias][ax1pos][ax2pos])):
dat = fildat[bias][ax1pos][ax2pos][fil_ind][lambind]
assert dat[0] == yuklambda
_, datfft, diagdatfft, daterr, diagdaterr, gfft, yukfft = dat
# To test chi2 fit against "fake" data, uncomment these lines
#datfft = yukfft * -0.5e9
#datfft += (1.0 / np.sqrt(2)) * daterr * rands + \
# (1.0 / np.sqrt(2)) * daterr * rands2 * 1.0j
#gfft = np.zeros_like(datfft)
for resp in [0,1,2]:
if (ignoreX and resp == 0) or \
(ignoreY and resp == 1) or \
(ignoreZ and resp == 2):
print(ignoreX, ignoreY, ignoreZ, resp)
continue
re_diff = datfft[resp].real - \
(gfft[resp].real + testalpha * yukfft[resp].real )
im_diff = datfft[resp].imag - \
(gfft[resp].imag + testalpha * yukfft[resp].imag )
if diag:
diag_re_diff = diagdatfft[resp].real - \
(gfft[resp].real + testalpha * yukfft[resp].real )
diag_im_diff = diagdatfft[resp].imag - \
(gfft[resp].imag + testalpha * yukfft[resp].imag )
#plt.plot(np.abs(re_diff))
#plt.plot(daterr[resp])
#plt.show()
chi_sq += ( np.sum( np.abs(re_diff)**2 / (0.5*daterr[resp]**2) ) + \
np.sum( np.abs(im_diff)**2 / (0.5*daterr[resp]**2) ) )
if diag:
diagchi_sq += ( np.sum( np.abs(diag_re_diff)**2 / \
(0.5*diagdaterr[resp]**2) ) + \
np.sum( np.abs(diag_im_diff)**2 / \
(0.5*diagdaterr[resp]**2) ) )
N += len(re_diff) + len(im_diff)
chi_sqs[alphaind] = chi_sq / (N - 1)
if diag:
diagchi_sqs[alphaind] = diagchi_sq / (N - 1)
max_chi = np.max(chi_sqs)
if diag:
max_diagchi = np.max(diagchi_sqs)
max_alpha = np.max(testalphas)
p0 = [max_chi/max_alpha**2, 0, 1]
if diag:
diag_p0 = [max_diagchi/max_alpha**2, 0, 1]
#if lambind == 0:
# p0 = [0.15e9, 0, 5]
#else:
# p0 = p0_old
if plot:
plt.figure(1)
plt.plot(testalphas, chi_sqs, color = colors[lambind])
if diag:
plt.figure(2)
plt.plot(testalphas, diagchi_sqs, color = colors[lambind])
try:
popt, pcov = opti.curve_fit(parabola, testalphas, chi_sqs, \
p0=p0, maxfev=100000)
if diag:
diagpopt, diagpcov = opti.curve_fit(parabola, testalphas, diagchi_sqs, \
p0=diag_p0, maxfev=1000000)
except:
print("Couldn't fit")
popt = [0,0,0]
popt[2] = np.mean(chi_sqs)
regular_con_val = con_val + np.min(chi_sqs)
if diag:
diag_con_val = con_val + np.min(diagchi_sqs)
# Select the positive root for the non-diagonalized data
soln1 = ( -1.0 * popt[1] + np.sqrt( popt[1]**2 - \
4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
soln2 = ( -1.0 * popt[1] - np.sqrt( popt[1]**2 - \
4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
if diag:
diagsoln1 = ( -1.0 * diagpopt[1] + np.sqrt( diagpopt[1]**2 - \
4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
diagsoln2 = ( -1.0 * diagpopt[1] - np.sqrt( diagpopt[1]**2 - \
4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
if soln1 > soln2:
alpha_con = soln1
else:
alpha_con = soln2
if diag:
if diagsoln1 > diagsoln2:
diagalpha_con = diagsoln1
else:
diagalpha_con = diagsoln2
alphas[lambind] = alpha_con
if diag:
diagalphas[lambind] = alpha_con
if plot:
plt.figure(1)
plt.title('Goodness of Fit for Various Lambda', fontsize=16)
plt.xlabel('Alpha Parameter [arb]', fontsize=14)
plt.ylabel('$\chi^2$', fontsize=18)
if diag:
plt.figure(2)
plt.title('Goodness of Fit for Various Lambda - DIAG', fontsize=16)
plt.xlabel('Alpha Parameter [arb]', fontsize=14)
plt.ylabel('$\chi^2$', fontsize=18)
plt.show()
if not diag:
diagalphas = np.zeros_like(alphas)
if save:
if savepath == '':
print('No save path given, type full path here')
savepath = input('path: ')
np.save(savepath, [lambdas, alphas, diagalphas])
return lambdas, alphas, diagalphas
def get_alpha_vs_file(fildat, diag=True, ignoreX=False, ignoreY=False, ignoreZ=False, \
plot=True, save=False, savepath='', confidence_level=0.95, \
only_closest=False, ax1='x', ax2='z', lamb_range=(1e-9, 1e-2)):
'''Loops over a list of file names, loads each file, diagonalizes,
then performs an optimal filter using the cantilever drive and
a theoretical force vs position to generate the filter/template.
The result of the optimal filtering is stored, and the data
released from memory
INPUTS: fildat
OUTPUTS:
'''
# For the confidence interval, compute the inverse CDF of a
# chi^2 distribution at given confidence level and compare to
# liklihood ratio via a goodness of fit parameter.
# Refer to scipy.stats documentation to understand chi2
chi2dist = stats.chi2(1)
# factor of 0.5 from Wilks's theorem: -2 log (Liklihood) ~ chi^2(1)
con_val = 0.5 * chi2dist.ppf(confidence_level)
colors = bu.get_color_map(len(lambdas))
alphas = np.zeros_like(lambdas)
diagalphas = np.zeros_like(lambdas)
testalphas = np.linspace(-10**10, 10**10, 11)
biasvec = list(fildat.keys())
biasvec.sort()
ax1posvec = list(fildat[biasvec[0]].keys())
ax1posvec.sort()
ax2posvec = list(fildat[biasvec[0]][ax1posvec[0]].keys())
ax2posvec.sort()
if only_closest:
if ax1 == 'x' and ax2 == 'z':
seps = minsep + (maxthrow - np.array(ax1posvec))
heights = np.array(ax2posvec) - beadheight
sind = np.argmin(seps)
hind = np.argmin(np.abs(heights - beadheight))
ax1posvec = [ax1posvec[sind]]
ax2posvec = [ax2posvec[hind]]
elif ax1 =='z' and ax2 == 'x':
seps = minsep + (maxthrow - np.array(ax2posvec))
heights = np.array(ax1pos) - beadheight
sind = np.argmin(seps)
hind = np.argmin(np.abs(heights - beadheight))
ax1posvec = [ax1posvec[hind]]
ax2posvec = [ax2posvec[sind]]
newlamb = lambdas[(lambdas > lamb_range[0]) * (lambdas < lamb_range[-1])]
tot_iterations = len(biasvec) * len(ax1posvec) * len(ax2posvec) * len(newlamb) * len(testalphas)
i = -1
for lambind, yuklambda in enumerate(lambdas):
if lambind != 48:
continue
if (yuklambda < lamb_range[0]) or (yuklambda > lamb_range[1]):
continue
test = fildat[biasvec[0]][ax1posvec[0]][ax2posvec[0]][0][lambind]
test_yukdat = test[-1]
test_dat = test[1]
newalpha = 1e-4 * np.sqrt(np.mean(np.abs(test_dat) / np.abs(test_yukdat)))
testalphas = np.linspace(-1.0*newalpha, newalpha, 11)
for bias, ax1pos, ax2pos in itertools.product(biasvec, ax1posvec, ax2posvec):
i += 1
bu.progress_bar(i, tot_iterations)
minalphas = [0] * len(fildat[bias][ax1pos][ax2pos])
diag_minalphas = [0] * len(fildat[bias][ax1pos][ax2pos])
for fil_ind in range(len(fildat[bias][ax1pos][ax2pos])):
dat = fildat[bias][ax1pos][ax2pos][fil_ind][lambind]
assert dat[0] == yuklambda
_, datfft, diagdatfft, daterr, diagdaterr, gfft, yukfft = dat
chi_sqs = np.zeros(len(testalphas))
diagchi_sqs = np.zeros(len(testalphas))
for alphaind, testalpha in enumerate(testalphas):
chi_sq = 0
diagchi_sq = 0
N = 0
for resp in [0,1,2]:
if (ignoreX and resp == 0) or \
(ignoreY and resp == 1) or \
(ignoreZ and resp == 2):
continue
re_diff = datfft[resp].real - \
(gfft[resp].real + testalpha * yukfft[resp].real )
im_diff = datfft[resp].imag - \
(gfft[resp].imag + testalpha * yukfft[resp].imag )
if diag:
diag_re_diff = diagdatfft[resp].real - \
(gfft[resp].real + testalpha * yukfft[resp].real )
diag_im_diff = diagdatfft[resp].imag - \
(gfft[resp].imag + testalpha * yukfft[resp].imag )
#plt.plot(np.abs(re_diff))
#plt.plot(daterr[resp])
#plt.show()
chi_sq += ( np.sum( np.abs(re_diff)**2 / (0.5*(daterr[resp]**2)) ) + \
np.sum( np.abs(im_diff)**2 / (0.5*(daterr[resp]**2)) ) )
if diag:
diagchi_sq += ( np.sum( np.abs(diag_re_diff)**2 / \
(0.5*(diagdaterr[resp]**2)) ) + \
np.sum( np.abs(diag_im_diff)**2 / \
(0.5*(diagdaterr[resp]**2)) ) )
N += len(re_diff) + len(im_diff)
chi_sqs[alphaind] = chi_sq / (N - 1)
if diag:
diagchi_sqs[alphaind] = diagchi_sq / (N - 1)
max_chi = np.max(chi_sqs)
if diag:
max_diagchi = np.max(diagchi_sqs)
max_alpha = np.max(testalphas)
p0 = [max_chi/max_alpha**2, 0, 1]
if diag:
diag_p0 = [max_diagchi/max_alpha**2, 0, 1]
try:
popt, pcov = opti.curve_fit(parabola, testalphas, chi_sqs, \
p0=p0, maxfev=100000)
if diag:
diagpopt, diagpcov = opti.curve_fit(parabola, testalphas, diagchi_sqs, \
p0=diag_p0, maxfev=1000000)
except:
print("Couldn't fit")
popt = [0,0,0]
popt[2] = np.mean(chi_sqs)
regular_con_val = con_val + np.min(chi_sqs)
if diag:
diag_con_val = con_val + np.min(diagchi_sqs)
# Select the positive root for the non-diagonalized data
soln1 = ( -1.0 * popt[1] + np.sqrt( popt[1]**2 - \
4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
soln2 = ( -1.0 * popt[1] - np.sqrt( popt[1]**2 - \
4 * popt[0] * (popt[2] - regular_con_val)) ) / (2 * popt[0])
if diag:
diagsoln1 = ( -1.0 * diagpopt[1] + np.sqrt( diagpopt[1]**2 - \
4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
diagsoln2 = ( -1.0 * diagpopt[1] - np.sqrt( diagpopt[1]**2 - \
4 * diagpopt[0] * (diagpopt[2] - diag_con_val)) ) / (2 * diagpopt[0])
if soln1 > soln2:
alpha_con = soln1
else:
alpha_con = soln2
if diag:
if diagsoln1 > diagsoln2:
diagalpha_con = diagsoln1
else:
diagalpha_con = diagsoln2
minalphas[fil_ind] = alpha_con
if diag:
diag_minalphas[fil_ind] = diagalpha_con
if plot:
minfig, minaxarr = plt.subplots(1,2,figsize=(10,5),dpi=150)
minaxarr[0].plot(minalphas)
minaxarr[0].set_title('Min $\\alpha$ vs. Time', fontsize=18)
minaxarr[0].set_xlabel('File Num', fontsize=16)
minaxarr[0].set_ylabel('$\\alpha$ [arb]', fontsize=16)
minaxarr[1].hist(minalphas, bins=20)
minaxarr[1].set_xlabel('$\\alpha$ [arb]', fontsize=16)
plt.tight_layout()
plt.show()
return minalphas
if not plot_just_current:
gfuncs, yukfuncs, lambdas, lims = build_mod_grav_funcs(theory_data_dir)
datafiles = bu.find_all_fnames(data_dir, ext=config.extensions['data'])
datafiles = datafiles[file_inds[0]:file_inds[1]]
if len(datafiles) == 0:
print("Found no files in: ", data_dir)
quit()
fildat = get_data_at_harms(datafiles, gfuncs, yukfuncs, lambdas, lims, \
minsep=minsep, maxthrow=maxthrow, beadheight=beadheight, \
cantind=0, ax1='x', ax2='z', diag=diag, plottf=False, \
nharmonics=nharmonics, harms=harms, \
ext_cant_drive=True, ext_cant_ind=1, \
ignoreX=ignoreX, ignoreY=ignoreY, ignoreZ=ignoreZ)
if compute_min_alpha:
_ = get_alpha_vs_file(fildat, only_closest=only_closest, \
ignoreX=ignoreX, ignoreY=ignoreY, ignoreZ=ignoreZ, \
lamb_range=lamb_range, diag=diag, plot=True)
newlambdas, alphas, diagalphas = \
get_alpha_lambda(fildat, only_closest=only_closest, \
ignoreX=ignoreX, ignoreY=ignoreY, ignoreZ=ignoreZ, \
lamb_range=lamb_range, diag=diag)
outdat = [newlambdas, alphas, diagalphas]
if save:
np.save(savepath, outdat)
if load:
dat = np.load(savepath)
newlambdas = dat[0]
alphas = dat[1]
diagalphas = dat[2]
fig, ax = plt.subplots(1,1,sharex='all',sharey='all',figsize=(5,5),dpi=150)
if diag:
fig2, ax2 = plt.subplots(1,1,sharex='all',sharey='all',figsize=(5,5),dpi=150)
if not plot_just_current:
ax.loglog(newlambdas, alphas, linewidth=2, label='95% CL')
if diag:
ax2.loglog(newlambdas, diagalphas, linewidth=2, label='95% CL')
ax.loglog(limitdata[:,0], limitdata[:,1], '--', label=limitlab, linewidth=3, color='r')
ax.loglog(limitdata2[:,0], limitdata2[:,1], '--', label=limitlab2, linewidth=3, color='k')
ax.grid()
ax.set_xlim(lambda_plot_lims[0], lambda_plot_lims[1])
ax.set_ylim(alpha_plot_lims[0], alpha_plot_lims[1])
ax.set_xlabel('$\lambda$ [m]')
ax.set_ylabel('$\\alpha$')
ax.legend(numpoints=1, fontsize=9)
ax.set_title(figtitle)
plt.tight_layout()
if diag:
ax2.loglog(limitdata[:,0], limitdata[:,1], '--', label=limitlab, linewidth=3, color='r')
ax2.loglog(limitdata2[:,0], limitdata2[:,1], '--', label=limitlab2, linewidth=3, color='k')
ax2.grid()
ax2.set_xlim(lambda_plot_lims[0], lambda_plot_lims[1])
ax2.set_ylim(alpha_plot_lims[0], alpha_plot_lims[1])
ax2.set_xlabel('$\lambda$ [m]')
ax2.set_ylabel('$\\alpha$')
ax2.legend(numpoints=1, fontsize=9)
ax2.set_title(figtitle)
plt.tight_layout()
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/mod_grav/old/alpha_lambda_from_timedomain_fit.py | alpha_lambda_from_timedomain_fit.py | py | 30,732 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.load",
... |
42130961103 | from beautifultable import BeautifulTable
from Contact_new import Contact
class InMemoryImpl:
contact_list = []
@classmethod
def addContact(cls):
name = input("enter name: ")
email = input("enter email: ")
mobile = input("enter mobile: ")
address = input("enter address: ")
cls.contact_list.append(Contact(name, email, mobile, address))
print(f"Contact is added succesfully!!! with name: {name} ")
@classmethod
def deleteContact(cls):
name = input("enetr name to delete: ")
contact = cls.get_contact_by_name(name)
if contact:
cls.contact_list.remove(contact)
print(f"contact: {name} deleted successfully!!!!")
else:
print(f"contact with name : {name} not found")
@classmethod
def viewContact(cls):
InMemoryImpl._paint(cls.contact_list)
@classmethod
def search(cls):
if len(cls.contact_list) > 0 :
name = input("enetr name to search: ")
s_list = list(filter(lambda x:name.lower() in x.get_name().lower(),cls.contact_list))
if len(s_list) > 0:
InMemoryImpl._paint(s_list)
else:
print("there is no data found with searched name: {name}")
else:
print("Contact book is empty!!..... You cant search!!!")
@classmethod
def get_contact_by_name(cls, name):
if len(cls.contact_list) > 0:
contact = list(filter(lambda x:x.get_name().lower() == name.lower(), cls.contact_list))
return contact[0] if contact else None
@classmethod
def updateContact(cls):
name = input("enetr name to update: ")
contact = cls.get_contact_by_name(name)
if contact:
print("1.Name 2.Email 3.Mobile 4.Address")
ch = int(input("enter your choice: "))
if ch == 1:
print(f"Old name: {contact.get_name()}")
name= input("entyer the new name: ")
if name:
contact.set_name(name)
elif ch == 2:
print(f"Old email: {contact.get_email()}")
email= input("entyer the new email: ")
if email:
contact.set_email(email)
elif ch == 3:
print(f"Old mobile: {contact.get_mobile()}")
mobile= input("entyer the new mobile: ")
if mobile:
contact.set_mobile(mobile)
elif ch == 4:
print(f"Old address: {contact.get_address()}")
address= input("entyer the new address: ")
if address:
contact.set_address(address)
else:
print(f"contact not found with name: {name}")
@staticmethod
def _paint(lst):
if len(lst) != 0:
table=BeautifulTable()
table.column_headers = ["Name", "Email", "Mobile", "Address"]
for c in lst:
table.append_row([c.get_name(),c.get_email(), c.get_mobile(), c.get_address()])
print(table)
else:
print(f"Contact Book is empty!.....") | adityaKoteCoder/codex | Contactbook/inmemory.py | inmemory.py | py | 3,251 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Contact_new.Contact",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "beautifultable.BeautifulTable",
"line_number": 91,
"usage_type": "call"
}
] |
34222277716 | # -*- coding: utf-8 -*-
import logging
import xml.sax
import slpyser.xmlparser.handlers as handlers
from slpyser.model.abap_objects.AbapDictionary import AbapDictionary
from slpyser.model.abap_objects.AbapMessageClass import AbapMessageClass
from slpyser.model.abap_objects.AbapTextPool import AbapTextElement
class SAPLinkContentHandle(xml.sax.ContentHandler):
"""
Implementation for SAX XML parser handle SAPLink file syntax.
"""
def __init__(self):
"""
Constructor
"""
self.__logger = logging.getLogger(__name__)
xml.sax.ContentHandler.__init__(self)
self._matrix_element_case_handler = {
# TextPool elements
'TEXTPOOL': [
self._startTextPool,
self._charactersTextPool,
self._endTextPool
],
'TEXTELEMENT': [
self._startTextPoolTextElement,
self._charactersTextPoolTextElement,
self._endTextPoolTextElement
],
# Message Class elements
'MSAG': [
self._startMessageClass,
None,
self._endMessageClass
],
'T100': [
self._startMessageClassMessage,
None,
None,
],
# General elements
'SOURCE': [
self._startSourceCode,
self._charactersSourceCode,
self._endSourceCode
],
'LANGUAGE': [
self._startTextLanguage,
self._charactersTextLanguage,
self._endTextLanguage
],
}
"""
Each element have three handlers, declared in that order:
1st: handle start of an element (retrieve element attributes);
2nd: handle contents of an element (retrieve data inside element);
3rd: handle end of an element.
"""
self.__unhandled_element = [
self._startUnhandled,
self._charactersUnhandled,
self._endUnhandled
]
# Attributes to be returned after parsing
self._abap_message_classes = {}
# Internal attributes, store references of current processed abap objects
self.__current_source_code_reference = None
self.__current_text_pool_reference = None
self.__current_class_documentation_reference = None
self.__current_text_language = None
self.__current_message_class = None
# Helper attributes
self.__current_tag = None
self.__current_tag_stack = []
# Decoupled parsers
self.__programs_parser = handlers.Program(owner=self)
self._matrix_element_case_handler.update(self.__programs_parser.map_parse())
self.__ddic_parser = handlers.DDIC(owner=self)
self._matrix_element_case_handler.update(self.__ddic_parser.map_parse())
self.__class_library_parser = handlers.ClassLibrary(owner=self)
self._matrix_element_case_handler.update(self.__class_library_parser.map_parse())
self.__function_group_parser = handlers.FunctionGroup(owner=self)
self._matrix_element_case_handler.update(self.__function_group_parser.map_parse())
@property
def abapClasses(self):
return self.__class_library_parser.parsed_classes
@property
def abapFunctionGroups(self):
return self.__function_group_parser.parsed_function_groups
@property
def abapMessageClasses(self):
return self._abap_message_classes
@property
def abapDictionary(self):
return AbapDictionary.from_ddic_handler(self.__ddic_parser)
@property
def abapPrograms(self):
return self.__programs_parser.parsed_programs
def startElement(self, name, attrs):
"""Parses start element"""
# Upper case on name because SAPLINK haven't used same case on all elements.
self.__current_tag = name.upper()
self.__current_tag_stack.append(self.__current_tag)
start_element_handler = self._matrix_element_case_handler.get(self.__current_tag, self.__unhandled_element)[0]
if start_element_handler is not None:
start_element_handler(name.upper(), attrs)
def characters(self, content):
"""
Parses inner contents of current element.
This method is called for each new line inside that element.
"""
characters_handler = self._matrix_element_case_handler.get(self.__current_tag, self.__unhandled_element)[1]
if characters_handler is not None:
characters_handler(content)
def endElement(self, name):
"""Parses end of element."""
if self.__current_tag != name.upper():
self.__logger.error('ERROR parsing file, current element was %s but closing element was %s' , self.__current_tag, name.upper())
end_element_handler = self._matrix_element_case_handler.get(self.__current_tag, self.__unhandled_element)[2]
if end_element_handler is not None:
end_element_handler(name.upper())
self.__current_tag_stack.pop()
# FIXME: Append None to currentTagStack to avoid little hack?
self.__current_tag = self.__current_tag_stack[-1] if len(self.__current_tag_stack) > 0 else None
# Below are declared method to properly handle elements and its contents
def _startMessageClass(self, name, attrs):
self.__logger.debug('Start message class')
name = attrs.get('ARBGB')
original_language = attrs.get('MASTERLANG')
responsible = attrs.get('RESPUSER', '')
short_text = attrs.get('STEXT', '')
message_class = AbapMessageClass(Name=name,
OriginalLanguage=original_language,
Responsible=responsible,
ShortText=short_text)
self.__current_message_class = message_class
def _endMessageClass(self, name):
msg_class = self.__current_message_class
self._abap_message_classes[msg_class.name] = msg_class
self.__current_message_class = None
def _startMessageClassMessage(self, name, attrs):
self.__logger.debug('Start Message Class Message')
language = attrs.get('SPRSL')
number = attrs.get('MSGNR')
text = attrs.get('TEXT')
message = AbapMessageClass.Message(Language=language,
Number=number,
Text=text)
if self.__current_message_class.language_mapping.get(language) == None:
self.__current_message_class.language_mapping[language] = {}
self.__current_message_class.language_mapping[language][number] = message
def _startSourceCode(self, name, attrs):
self.__logger.debug('Start Source Code')
def _charactersSourceCode(self, content):
self.__current_source_code_reference.source_code.append(content)
def charactersSourceCode(self, content):
self._charactersSourceCode(content)
def _endSourceCode(self, name):
self.__logger.debug('End Source Code')
def _startTextLanguage(self, name, attrs):
self.__logger.debug('Start Text Language')
self.__current_text_language = attrs.get('SPRAS')
# Initializing language dict
if self.__current_text_pool_reference is not None:
self.__current_text_pool_reference.language_mapping[self.__current_text_language] = {}
elif self.__current_class_documentation_reference is not None:
self.__current_class_documentation_reference.languageMappint[self.__current_text_language] = []
def _charactersTextLanguage(self, content):
pass
def _endTextLanguage(self, name):
self.__logger.debug('End Text Language')
self.__current_text_language = None
def _startTextPool(self, name, attrs):
self.__logger.debug('Start Text Pool')
def _charactersTextPool(self, content):
pass
def _endTextPool(self, name):
self.__logger.debug('End Text Pool')
def _startTextPoolTextElement(self, name, attrs):
self.__logger.debug('Start Text Pool Text Element')
text_id = attrs.get('ID')
key = attrs.get('KEY')
entry = attrs.get('ENTRY')
length = attrs.get('LENGTH')
text_element = AbapTextElement(TextId=text_id,
TextKey=key,
TextEntry=entry,
Length=length)
if self.__current_text_pool_reference is not None:
self.__current_text_pool_reference.addTextElement(Language=self.__current_text_language,
TextElement=text_element)
else:
self.__logger.warning('[FIXME] A text pool''s entry "%s" was found but the current abap object wasn''t expecting a text pool.', entry)
def _charactersTextPoolTextElement(self, content):
pass
def _endTextPoolTextElement(self, name):
self.__logger.debug('End Text Pool Text Element')
def _startUnhandled(self, name, attrs):
self.__logger.warning('Start of an unhandled element: %s', name)
def _charactersUnhandled(self, content):
self.__logger.warning('Content of unhandled tag: %s', content)
def _endUnhandled(self, name):
self.__logger.warning('End of an unhandled element: %s', name)
def set_current_source_code_reference(self, source_reference):
self.__current_source_code_reference = source_reference
source_reference.source_code = []
def finalize_source_code(self):
"""
Join the source code's array into a string, and clean it's reference from parser.
"""
self.__current_source_code_reference.source_code = ''.join(self.__current_source_code_reference.source_code)
self.__current_source_code_reference = None
def set_current_textpool_reference(self, textpool_reference):
self.__current_text_pool_reference = textpool_reference
def finalize_textpool(self):
self.__current_text_pool_reference = None
| thalesvb/slpyser | slpyser/xmlparser/SAPLinkContentHandle.py | SAPLinkContentHandle.py | py | 10,301 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "xml.sax.sax",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "xml.sax",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "xml.sax.sax.ContentHandler... |
17116817824 | import sys
import argparse
import os
import math
from ROOT import TCanvas, TColor, TGaxis, TH1F, TPad, TString, TFile, TH1, THStack, gROOT, TStyle, TAttFill, TLegend, TGraphAsymmErrors, TLine
from ROOT import kBlack, kBlue, kRed, kCyan, kViolet, kGreen, kOrange, kGray, kPink, kTRUE
from ROOT import Double
from ROOT import gROOT, gStyle
from functools import reduce
gROOT.SetBatch(1)
gROOT.Reset()
gStyle.SetCanvasColor(0)
gStyle.SetFrameBorderMode(0)
gStyle.SetOptStat(0)
gStyle.SetTitleX(0.5) # title X location
gStyle.SetTitleY(0.96) # title Y location
gStyle.SetPaintTextFormat(".2f")
# options
usage = 'usage: %prog [options]'
parser = argparse.ArgumentParser(usage)
Nuisances_lnN={
"pdf_Higgs_ttH":0.036,
"QCDscale_ttH":0.093,
"pdf_tHq":0.010,
"QCDscale_tHq":0.067,
"pdf_tHW":0.027,
"QCDscale_tHW":0.061,
"pdf_TTW":0.04,"QCDscale_TTW":0.129,
"pdf_TTWW":0.03,"QCDscale_TTWW":0.109,
"pdf_TTZ":0.035, "QCDscale_TTZ":0.112,
"CMS_ttHl_WZ_theo":0.07,
"pdf_WH":0.019,"QCDscale_WH":0.07,
"pdf_ZH":0.016,"QCDscale_ZH":0.038,
"pdf_qqH":0.021,"QCDscale_qqH":0.04,
"pdf_ggH":0.031,"QCDscale_ggH":0.081,
"BR_htt":0.016,"BR_hww":0.015,"BR_hzz":0.015,"BR_hzg":0.010,"BR_hmm":0.010,
"lumi":0.03,"CMS_ttHl_QF":0.300,"CMS_ttHl_EWK_4j":0.300,"CMS_ttHl_Convs":0.500,"CMS_ttHl_Rares":0.500,"CMS_ttHl_EWK":0.500,
}
lnN_per_sample={
"data_flips":["CMS_ttHl_QF"],
"TTZ":["pdf_TTZ","QCDscale_TTZ","lumi"],
"TTW":["pdf_TTW","QCDscale_TTW","lumi"],
"TTWW":["pdf_TTWW","QCDscale_TTWW","lumi"],
"WZ":["CMS_ttHl_EWK_4j","CMS_ttHl_EWK","lumi"],
"ZZ":["CMS_ttHl_EWK_4j","CMS_ttHl_EWK","lumi"],
"Convs":["CMS_ttHl_Convs","lumi"],
"Rares":["CMS_ttHl_Rares","lumi"],
"ttH_hww":["pdf_Higgs_ttH","QCDscale_ttH","BR_hww","lumi"],
"ttH_hzz":["pdf_Higgs_ttH","QCDscale_ttH","BR_hzz","lumi"],
"ttH_hmm":["pdf_Higgs_ttH","QCDscale_ttH","BR_hmm","lumi"],
"ttH_htt":["pdf_Higgs_ttH","QCDscale_ttH","BR_htt","lumi"],
"ttH_hzg":["pdf_Higgs_ttH","QCDscale_ttH","BR_hzg","lumi"],
"tHW_hww":["pdf_tHW","QCDscale_tHW","BR_hww","lumi"],
"tHW_hzz":["pdf_tHW","QCDscale_tHW","BR_hzz","lumi"],
"tHW_hmm":["pdf_tHW","QCDscale_tHW","BR_hmm","lumi"],
"tHW_htt":["pdf_tHW","QCDscale_tHW","BR_htt","lumi"],
"tHW_hzg":["pdf_tHW","QCDscale_tHW","BR_hzg","lumi"],
"tHq_hww":["pdf_tHq","QCDscale_tHq","BR_hww","lumi"],
"tHq_hzz":["pdf_tHq","QCDscale_tHq","BR_hzz","lumi"],
"tHq_hmm":["pdf_tHq","QCDscale_tHq","BR_hmm","lumi"],
"tHq_htt":["pdf_tHq","QCDscale_tHq","BR_htt","lumi"],
"tHq_hzg":["pdf_tHq","QCDscale_tHq","BR_hzg","lumi"],
"qqH_hww":["pdf_qqH","QCDscale_qqH","BR_hww","lumi"],
"qqH_hzz":["pdf_qqH","QCDscale_qqH","BR_hzz","lumi"],
"qqH_htt":["pdf_qqH","QCDscale_qqH","BR_htt","lumi"],
"ggH_hww":["pdf_ggH","QCDscale_ggH","BR_hww","lumi"],
"ggH_hzz":["pdf_ggH","QCDscale_ggH","BR_hzz","lumi"],
"ggH_htt":["pdf_ggH","QCDscale_ggH","BR_htt","lumi"],
"WH_hww":["pdf_WH","QCDscale_WH","BR_hww","lumi"],
"WH_hzz":["pdf_WH","QCDscale_WH","BR_hzz","lumi"],
"WH_htt":["pdf_WH","QCDscale_WH","BR_htt","lumi"],
"ZH_hww":["pdf_ZH","QCDscale_ZH","BR_hww","lumi"],
"ZH_hzz":["pdf_ZH","QCDscale_ZH","BR_hzz","lumi"],
"ZH_htt":["pdf_ZH","QCDscale_ZH","BR_htt","lumi"],
"TTWH_hww":["BR_hww","lumi"],
"TTWH_hzz":["BR_hzz","lumi"],
"TTWH_htt":["BR_htt","lumi"],
"TTZH_hww":["BR_hww","lumi"],
"TTZH_hzz":["BR_hzz","lumi"],
"TTZH_htt":["BR_htt","lumi"],
}
common_shape = ["CMS_ttHl_lepEff_muloose","CMS_ttHl_lepEff_elloose", "CMS_ttHl_lepEff_mutight","CMS_ttHl_lepEff_eltight", "CMS_ttHl_JER","CMS_ttHl_UnclusteredEn","CMS_scale_j_jesFlavorQCD", "CMS_scale_j_jesRelativeBal","CMS_scale_j_jesHF","CMS_scale_j_jesBBEC1","CMS_scale_j_jesEC2","CMS_scale_j_jesAbsolute"]
thuShape_samples = ["ttH_htt","ttH_hzz","ttH_hww","ttH_hmm","ttH_hzg","tHq_htt","tHq_hww","tHq_hzz","tHW_htt","tHW_hww","tHW_hzz","TTW","TTZ"]
thuShape = ["CMS_ttHl_thu_shape_ttH_x1","CMS_ttHl_thu_shape_ttH_y1"]
fakeShape = ["CMS_ttHl_Clos_e_shape","CMS_ttHl_Clos_m_shape","CMS_ttHl_FRm_norm","CMS_ttHl_FRm_pt","CMS_ttHl_FRm_be","CMS_ttHl_FRe_norm","CMS_ttHl_FRe_pt","CMS_ttHl_FRe_be"]
shape_2016=[
"CMS_ttHl16_L1PreFiring", "CMS_ttHl16_btag_HFStats1","CMS_ttHl16_btag_HFStats2","CMS_ttHl16_btag_LFStats1","CMS_ttHl16_btag_LFStats2","PU_16",
"CMS_scale_j_jesRelativeSample_2016","CMS_scale_j_jesBBEC1_2016","CMS_scale_j_jesEC2_2016","CMS_scale_j_jesAbsolute_2016","CMS_scale_j_jesHF_2016",
]
shape_2017=[
"CMS_ttHl17_L1PreFiring", "CMS_ttHl17_btag_HFStats1","CMS_ttHl17_btag_HFStats2","CMS_ttHl17_btag_LFStats1","CMS_ttHl17_btag_LFStats2","PU_17",
"CMS_scale_j_jesRelativeSample_2017","CMS_scale_j_jesBBEC1_2017","CMS_scale_j_jesEC2_2017","CMS_scale_j_jesAbsolute_2017","CMS_scale_j_jesHF_2017",
]
shape_2018=[
"CMS_ttHl18_btag_HFStats1","CMS_ttHl18_btag_HFStats2","CMS_ttHl18_btag_LFStats1","CMS_ttHl18_btag_LFStats2","PU_18",
"CMS_scale_j_jesRelativeSample_2018","CMS_scale_j_jesBBEC1_2018","CMS_scale_j_jesEC2_2018","CMS_scale_j_jesAbsolute_2018","CMS_scale_j_jesHF_2018",
]
def draw_underflow_overflow(h1):
h1.GetXaxis().SetRange(0, h1.GetNbinsX() + 1)
h1.Draw()
return h1
def fill_underflow_overflow(h1):
nbin = h1.GetNbinsX()
h1.Fill(h1.GetBinCenter(1),h1.GetBinContent(0))
h1.Fill(h1.GetBinCenter(nbin),h1.GetBinContent(nbin+1))
h1.Draw()
return h1
def fill_lnN_error(hist_nom, lnNs):
if len(lnNs) ==0:
return hist_nom
nbin = hist_nom.GetNbinsX()
error_rel = 0
error_rel = reduce((lambda x,y : math.sqrt(x**2 + y**2)), lnNs)
for i in range(1,nbin+1):
central_val = hist_nom.GetBinContent(i)
error_lnN = central_val * error_rel
error_nom = hist_nom.GetBinError(i)
error = math.sqrt(error_nom**2 + error_lnN**2)
hist_nom.SetBinError(i, error)
return hist_nom
def set_lnN_error(hist_nom, lnNs):
nbin = hist_nom.GetNbinsX()
error_rel = 0
if len(lnNs) ==0:
for i in range(1,nbin+1):
hist_nom.SetBinError(i, 0)
return hist_nom
error_rel = reduce((lambda x,y : math.sqrt(x**2 + y**2)), lnNs)
for i in range(1,nbin+1):
central_val = hist_nom.GetBinContent(i)
error_lnN = central_val * error_rel
hist_nom.SetBinError(i, error_lnN)
return hist_nom
def fill_shape_error(hist_nom, hist_up, hist_down):
nbin = hist_nom.GetNbinsX()
for i in range(1,nbin+1):
central_val = hist_nom.GetBinContent(i)
error_nom = hist_nom.GetBinError(i)
error_up = abs(central_val - hist_up.GetBinContent(i))
error_down = abs(central_val - hist_up.GetBinContent(i))
error_syst = max(error_up, error_down)
error = math.sqrt(error_nom**2 + error_syst**2)
hist_nom.SetBinError(i, error)
return hist_nom
def find_lnN(keyname):
names_lnN=[]
if keyname in lnN_per_sample:
names_lnN = lnN_per_sample[keyname]
else:
print("########## WARNING ######### {} is not found in lnN_per_sample, set it to empty list ".format(keyname))
err_lnNs = []
for name_lnN in names_lnN:
if name_lnN in Nuisances_lnN:
err_lnNs.append(Nuisances_lnN[name_lnN])
else:
print("########## WARNING ######### {} is not found in Nuisances_lnN, skip this nuisance ".format(name_lnN))
return err_lnNs
def find_shapes(keyname, era):
names_shapes = []
if era == "2016":
mc_shapes = common_shape + shape_2016
elif era == "2017":
mc_shapes = common_shape + shape_2017
elif era == "2018":
mc_shapes = common_shape + shape_2018
else:
print("ERROR year must be 2016 2017 or 2018")
sys.exit()
if "fakes" in keyname or "Fakes" in keyname:
names_shapes = fakeShape
elif "data" in keyname:
return names_shapes
elif keyname in thuShape_samples:
names_shapes = mc_shapes + thuShape
else:
names_shapes = mc_shapes
return names_shapes
def getvarhists(rfile, keyname, systname):
h_up = rfile.Get("{}_{}Up".format(keyname,systname))
h_up.SetDirectory(0)
h_down = rfile.Get("{}_{}Down".format(keyname,systname))
h_down.SetDirectory(0)
return h_up, h_down
# outtput
outfilename = "{}/ttH_{}_{}_full_uncertanty_runII.root".format(outputdir,region , cutname)
f_out = TFile(outfilename,"recreate")
print(" recreate file {}".format(outfilename))
for feature, values in features.items():
for sample in sampleName:
outhist_sum = sample+"_"+feature+"_runII"
outhist_sum_stat = sample+"_"+feature+"_runII_stat"
outhist_sum_syst = sample+"_"+feature+"_runII_syst"
ycount = 0
for y in ["2016","2017","2018"]:
file0 = TFile("{}/{}/{}/ttH_{}_{}_{}.root".format(inputDir, catflag, feature, region, cutname, y),"read")
errorlnNs = find_lnN(sample)
errShapes = find_shapes(sample, y)
file0.cd()
h_nom = file0.Get(sample)
h_nom.SetDirectory(0)
h_stat = h_nom.Clone(sample+"_stat")
h_stat.SetDirectory(0)
h_syst = h_nom.Clone(sample+"_syst")
h_syst.SetDirectory(0)
hist_all = fill_lnN_error(h_nom, errorlnNs)
h_syst = set_lnN_error(h_syst, errorlnNs)
# count = 0
for shapeName in errShapes:
#print( "sample {} syst {} ".format(sample, shapeName))
hist_up, hist_down = getvarhists(file0, sample, shapeName)
hist_all = fill_shape_error(hist_all, hist_up, hist_down)
h_syst = fill_shape_error(h_syst, hist_up, hist_down)
outhist_name = sample+"_"+feature+"_"+y
h_out = hist_all.Clone(outhist_name)
h_out.SetTitle(outhist_name)
h_out.SetName(outhist_name)
outhist_name_stat = sample+"_"+feature+"_"+y + "_stat"
h_out_stat = h_stat.Clone(outhist_name_stat)
h_out_stat.SetTitle(outhist_name_stat)
h_out_stat.SetName(outhist_name_stat)
outhist_name_syst = sample+"_"+feature+"_"+y + "_syst"
h_out_syst = h_syst.Clone(outhist_name_syst)
h_out_syst.SetTitle(outhist_name_syst)
h_out_syst.SetName(outhist_name_syst)
f_out.cd()
h_out.Write()
h_out_stat.Write()
h_out_syst.Write()
# sum
if ycount ==0:
h_outsum = hist_all.Clone(outhist_sum)
h_outsum.SetTitle(outhist_sum)
h_outsum.SetName(outhist_sum)
h_outsum_stat = h_out_stat.Clone(outhist_sum_stat)
h_outsum_stat.SetTitle(outhist_sum_stat)
h_outsum_stat.SetName(outhist_sum_stat)
h_outsum_syst = h_out_syst.Clone(outhist_sum_syst)
h_outsum_syst.SetTitle(outhist_sum_syst)
h_outsum_syst.SetName(outhist_sum_syst)
else:
h_outsum.Add(hist_all)
h_outsum_stat.Add(h_out_stat)
h_outsum_syst = h_syst_add(h_outsum_syst, h_out_syst)
ycount +=1
f_out.cd()
h_outsum.Write()
h_outsum_stat.Write()
h_outsum_syst.Write()
f_out.Close()
| BinghuanLi/post_tWIHEP | plotters/make_systHists.py | make_systHists.py | py | 11,190 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ROOT.gROOT.SetBatch",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ROOT.gROOT",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "ROOT.gROOT.Reset",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ROOT.gROOT",
"lin... |
13653886738 | import matplotlib.pyplot as plt
def main():
filename = input('Enter a file name: ')
X = [0,1,2,3,4,5]
Y=[0.78,0.92,0.91,0.88,0.88,0.89]
#plt.ylabel('Generation with best result')
plt.ylabel('Accuracy of result')
plt.plot(X,Y)
plt.xlabel('Degree of polynomial')
plt.show()
x = [[9, 5, 9], [7, 8, 9]]
print(x)
if __name__ == '__main__':
main() | agatachamula/genetic-algorthm | graphs.py | graphs.py | py | 422 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mat... |
29725189606 | import pandas as pd
import numpy as np
def iat_get_dscore_each_stim(df,subject,rt,block,condition,stimulus,cond1,cond2,blocks,weighted):
'''
Take all relevant columns and produce a D score for each stimulus (i.e. word).
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)]
if weighted==True:
blocks=sorted(blocks)
blcnd_rt=df.groupby([subject,stimulus,condition,block])[rt].mean()
#Get mean RT for each block of each condition
cond1rt_bl1=blcnd_rt.loc[idx[:,:,cond1,[blocks[0],blocks[2]]]]
cond1rt_bl2=blcnd_rt.loc[idx[:,:,cond1,[blocks[1],blocks[3]]]]
cond2rt_bl1=blcnd_rt.loc[idx[:,:,cond2,[blocks[0],blocks[2]]]]
cond2rt_bl2=blcnd_rt.loc[idx[:,:,cond2,[blocks[1],blocks[3]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl1.index=cond1rt_bl1.index.droplevel([2,3])
cond1rt_bl2.index=cond1rt_bl2.index.droplevel([2,3])
cond2rt_bl1.index=cond2rt_bl1.index.droplevel([2,3])
cond2rt_bl2.index=cond2rt_bl2.index.droplevel([2,3])
#Get RT standard deviation separately for first and second blocks
b1rt_std=df[(df[block]==blocks[0])|(df[block]==blocks[2])].groupby([subject,stimulus])[rt].std()
b2rt_std=df[(df[block]==blocks[1])|(df[block]==blocks[3])].groupby([subject,stimulus])[rt].std()
#Get D score
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d=(d1+d2)/2
elif weighted==False:
cnds = df.groupby([subject,stimulus,condition])
d = (cnds[rt].mean().unstack()[cond1]-cnds[rt].mean().unstack()[cond2])/df.groupby([subject,stimulus])[rt].std()
return(d)
def iat_get_dscore_across_stim(df,subject,rt,block,condition,cond1,cond2,blocks,weighted):
'''
Take all relevant columns and produce a D score across all stimuli (i.e. words), which is standard.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)]
if weighted==True:
blocks=sorted(blocks)
blcnd_rt=df.groupby([subject,condition,block])[rt].mean()
#Get mean RT for each block of each condition
cond1rt_bl1=blcnd_rt.loc[idx[:,cond1,[blocks[0],blocks[2]]]]
cond1rt_bl2=blcnd_rt.loc[idx[:,cond1,[blocks[1],blocks[3]]]]
cond2rt_bl1=blcnd_rt.loc[idx[:,cond2,[blocks[0],blocks[2]]]]
cond2rt_bl2=blcnd_rt.loc[idx[:,cond2,[blocks[1],blocks[3]]]]
#Drop block and condidition levels to subtract means
for df_tmp in [cond1rt_bl1,cond1rt_bl2,cond2rt_bl1,cond2rt_bl2]:
df_tmp.index=df_tmp.index.droplevel([1,2])
#Get RT standard deviation separately for first and second blocks
b1rt_std=df[(df[block]==blocks[0])|(df[block]==blocks[2])].groupby(subject)[rt].std()
b2rt_std=df[(df[block]==blocks[1])|(df[block]==blocks[3])].groupby(subject)[rt].std()
#Get D score
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d=(d1+d2)/2
d=pd.concat([d1,d2,d],axis=1)
d.columns=['dscore1','dscore2','dscore']
return(d)
elif weighted==False:
cnds = df.groupby([subject,condition])
d = (cnds[rt].mean().unstack()[cond1]-cnds[rt].mean().unstack()[cond2])/df.groupby(subject)[rt].std()
d.name='dscore'
return(d)
def biat_get_dscore_each_stim(df,subject,rt,block,condition,stimulus,cond1,cond2,blocks,weighted):
'''
Take all relevant columns and produce a D score for each stimulus (i.e. word).
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)]
if weighted==True:
blocks=sorted(blocks)
blcnd_rt=df.groupby([subject,stimulus,condition,block])[rt].mean()
#Get mean RT for each block of each condition
cond1rt_bl1=blcnd_rt.loc[idx[:,:,cond1,[blocks[0],blocks[1]]]]
cond2rt_bl1=blcnd_rt.loc[idx[:,:,cond2,[blocks[0],blocks[1]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl1.index=cond1rt_bl1.index.droplevel([2,3])
cond2rt_bl1.index=cond2rt_bl1.index.droplevel([2,3])
#Get RT standard deviation separately for first and second blocks
b1rt_std=df[(df[block]==blocks[0])|(df[block]==blocks[1])].groupby([subject,stimulus])[rt].std()
if len(blocks)>=4:
cond1rt_bl2=blcnd_rt.loc[idx[:,:,cond1,[blocks[2],blocks[3]]]]
cond2rt_bl2=blcnd_rt.loc[idx[:,:,cond2,[blocks[2],blocks[3]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl2.index=cond1rt_bl2.index.droplevel([2,3])
cond2rt_bl2.index=cond2rt_bl2.index.droplevel([2,3])
b2rt_std=df[(df[block]==blocks[2])|(df[block]==blocks[3])].groupby([subject,stimulus])[rt].std()
if len(blocks)>=6:
cond1rt_bl3=blcnd_rt.loc[idx[:,:,cond1,[blocks[4],blocks[5]]]]
cond2rt_bl3=blcnd_rt.loc[idx[:,:,cond2,[blocks[4],blocks[5]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl3.index=cond1rt_bl3.index.droplevel([2,3])
cond2rt_bl3.index=cond2rt_bl3.index.droplevel([2,3])
b3rt_std=df[(df[block]==blocks[4])|(df[block]==blocks[5])].groupby([subject,stimulus])[rt].std()
if len(blocks)==2:
d=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
elif len(blocks)==4:
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d=(d1+d2)/2
elif len(blocks)==6:
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d3=(cond1rt_bl3-cond2rt_bl3)/b3rt_std
d=(d1+d2+d3)/2
elif weighted==False:
cnds = df.groupby([subject,stimulus,condition])
d = (cnds[rt].mean().unstack()[cond1]-cnds[rt].mean().unstack()[cond2])/df.groupby([subject,stimulus])[rt].std()
return(d)
def biat_get_dscore_across_stim(df,subject,rt,block,condition,cond1,cond2,blocks,weighted):
'''
Take all relevant columns and produce a D score for each stimulus (i.e. word).
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)]
if weighted==True:
blocks=sorted(blocks)
blcnd_rt=df.groupby([subject,condition,block])[rt].mean()
#Get mean RT for each block of each condition
cond1rt_bl1=blcnd_rt.loc[idx[:,cond1,[blocks[0],blocks[1]]]]
cond2rt_bl1=blcnd_rt.loc[idx[:,cond2,[blocks[0],blocks[1]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl1.index=cond1rt_bl1.index.droplevel([1,2])
cond2rt_bl1.index=cond2rt_bl1.index.droplevel([1,2])
#Get RT standard deviation separately for first and second blocks
b1rt_std=df[(df[block]==blocks[0])|(df[block]==blocks[1])].groupby([subject])[rt].std()
if len(blocks)>=4:
cond1rt_bl2=blcnd_rt.loc[idx[:,cond1,[blocks[2],blocks[3]]]]
cond2rt_bl2=blcnd_rt.loc[idx[:,cond2,[blocks[2],blocks[3]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl2.index=cond1rt_bl2.index.droplevel([1,2])
cond2rt_bl2.index=cond2rt_bl2.index.droplevel([1,2])
b2rt_std=df[(df[block]==blocks[2])|(df[block]==blocks[3])].groupby([subject])[rt].std()
if len(blocks)>=6:
cond1rt_bl3=blcnd_rt.loc[idx[:,cond1,[blocks[4],blocks[5]]]]
cond2rt_bl3=blcnd_rt.loc[idx[:,cond2,[blocks[4],blocks[5]]]]
#Drop block and condidition levels to subtract means
cond1rt_bl3.index=cond1rt_bl3.index.droplevel([1,2])
cond2rt_bl3.index=cond2rt_bl3.index.droplevel([1,2])
b3rt_std=df[(df[block]==blocks[4])|(df[block]==blocks[5])].groupby([subject])[rt].std()
if len(blocks)==2:
d=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d.name='dscore'
elif len(blocks)==4:
#Get D score
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d=(d1+d2)/2
d=pd.concat([d1,d2,d],axis=1)
d.columns=['dscore1','dscore2','dscore']
elif len(blocks)==6:
#Get D score
d1=(cond1rt_bl1-cond2rt_bl1)/b1rt_std
d2=(cond1rt_bl2-cond2rt_bl2)/b2rt_std
d3=(cond1rt_bl3-cond2rt_bl3)/b3rt_std
d=(d1+d2+d3)/3
d=pd.concat([d1,d2,d3,d],axis=1)
d.columns=['dscore1','dscore2','dscore3','dscore']
return(d)
elif weighted==False:
cnds = df.groupby([subject,stimulus,condition])
d = (cnds[rt].mean().unstack()[cond1]-cnds[rt].mean().unstack()[cond2])/df.groupby(subject)[rt].std()
d.name='dscore'
return(d)
def iat_get_dscore(df,subject,rt,block,condition,cond1,cond2,blocks,weighted,biat,each_stim,stimulus):
'''
Select either iat_get_dscore_across_stim or iat_get_dscore_each_stim, depending on the each_stim argument.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
#Get D scores
if biat==False:
if each_stim==False:
d=iat_get_dscore_across_stim(df,subject,rt,block,condition,cond1,cond2,blocks,weighted)
if weighted == False:
d=d.to_frame()
elif each_stim==True:
d=iat_get_dscore_each_stim(df,subject,rt,block,condition,stimulus,cond1,cond2,blocks,weighted)
d=d.unstack()
elif biat==True:
if each_stim==False:
d=biat_get_dscore_across_stim(df,subject,rt,block,condition,cond1,cond2,blocks,weighted)
if weighted == False:
d=d.to_frame()
elif each_stim==True:
d=biat_get_dscore_each_stim(df,subject,rt,block,condition,stimulus,cond1,cond2,blocks,weighted)
d=d.unstack()
return(d)
def overall_fast_slow_stats(df,rt,fast_rt,slow_rt,subject,flags):
'''
Return the total number of trials removed across all subjects and across those without flags for poor performance.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
#Count all fast and slow trials across all subjects
all_fast_rt_count_all_subs=df[df[rt]<fast_rt][rt].count()
all_slow_rt_count_all_subs=df[df[rt]>=slow_rt][rt].count()
all_fast_rt_pct_all_subs=df[df[rt]<fast_rt][rt].count()/df[rt].count().astype(float)
all_slow_rt_pct_all_subs=df[df[rt]>=slow_rt][rt].count()/df[rt].count().astype(float)
#Now remove subjects with flags and recount
df_no_flag=df[df[subject].isin(flags[flags.iat_flag==0].index)].copy(deep=True)
all_fast_rt_count_incl_subs=df_no_flag[(df_no_flag[rt]<fast_rt)][rt].count()
all_slow_rt_count_incl_subs=df_no_flag[(df_no_flag[rt]>=slow_rt)][rt].count()
all_fast_rt_pct_incl_subs=df_no_flag[(df_no_flag[rt]<fast_rt)][rt].count()/df_no_flag[rt].count().astype(float)
all_slow_rt_pct_incl_subs=df_no_flag[(df_no_flag[rt]>=slow_rt)][rt].count()/df_no_flag[rt].count().astype(float)
all_fast_slow_rt=pd.DataFrame([all_fast_rt_count_all_subs,all_fast_rt_pct_all_subs,\
all_slow_rt_count_all_subs,all_slow_rt_pct_all_subs,\
all_fast_rt_count_incl_subs,all_fast_rt_pct_incl_subs,\
all_slow_rt_count_incl_subs,all_slow_rt_pct_incl_subs],
index=['fast_rt_count_all_subs','fast_rt_pct_all_subs',\
'slow_rt_count_all_subs','slow_rt_pct_all_subs',\
'fast_rt_count_included_subs','fast_rt_pct_included_subs',\
'slow_rt_count_included_subs','slow_rt_pct_included_subs']\
,columns=['fast_slow_rt'])
return(all_fast_slow_rt)
def blcnd_extract(df,var,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat='pct',include_blocks=True):
'''
Generic groupby function to group by subject depending on condition
and groupby condition and block (or just condition if unweighted) to
extract particular variables (errors, too fast\too slow) by condition and block.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
idx=pd.IndexSlice
if flag_outformat=='pct':
all_df=df.groupby(subject)[var].mean()
##By condition
cond1_df=df[(df[condition]==cond1)].groupby(subject)[var].mean()
cond2_df=df[(df[condition]==cond2)].groupby(subject)[var].mean()
##By condition and block
if include_blocks == True:
blcnd=df.groupby([subject,condition,block])[var].mean()
elif flag_outformat=='sum':
all_df=df.groupby(subject)[var].sum()
##By condition
cond1_df=df[(df[condition]==cond1)].groupby(subject)[var].sum()
cond2_df=df[(df[condition]==cond2)].groupby(subject)[var].sum()
##By condition and block
if include_blocks == True:
blcnd=df.groupby([subject,condition,block])[var].sum()
elif flag_outformat=='count':
all_df=df.groupby(subject)[var].count()
##By condition
cond1_df=df[(df[condition]==cond1)].groupby(subject)[var].count()
cond2_df=df[(df[condition]==cond2)].groupby(subject)[var].count()
##By condition and block
if include_blocks == True:
blcnd=df.groupby([subject,condition,block])[var].count()
if (include_blocks == True) and (biat==False):
cond1_bl1=blcnd.loc[idx[:,cond1,[blocks[0],blocks[2]]]]
cond1_bl2=blcnd.loc[idx[:,cond1,[blocks[1],blocks[3]]]]
cond2_bl1=blcnd.loc[idx[:,cond2,[blocks[0],blocks[2]]]]
cond2_bl2=blcnd.loc[idx[:,cond2,[blocks[1],blocks[3]]]]
#Drop block and condidition levels to subtract means
for df_tmp in [cond1_bl1,cond1_bl2,cond2_bl1,cond2_bl2]:
df_tmp.index=df_tmp.index.droplevel([1,2])
out=pd.concat([all_df,cond1_df,cond2_df,cond1_bl1,cond1_bl2,cond2_bl1,cond2_bl2],axis=1)
elif (include_blocks == True) and (biat==True):
if len(blocks)>=2:
cond1_bl1=blcnd.loc[idx[:,cond1,[blocks[0],blocks[1]]]]
cond2_bl1=blcnd.loc[idx[:,cond2,[blocks[0],blocks[1]]]]
for df_tmp in [cond1_bl1,cond2_bl1]:
df_tmp.index=df_tmp.index.droplevel([1,2])
out=pd.concat([all_df,cond1_df,cond2_df,cond1_bl1,cond2_bl1],axis=1)
if len(blocks)>=4:
cond1_bl2=blcnd.loc[idx[:,cond1,[blocks[2],blocks[3]]]]
cond2_bl2=blcnd.loc[idx[:,cond2,[blocks[2],blocks[3]]]]
for df_tmp in [cond1_bl2,cond2_bl2]:
df_tmp.index=df_tmp.index.droplevel([1,2])
out=pd.concat([out,cond1_bl2,cond2_bl2],axis=1)
if len(blocks)==6:
cond1_bl3=blcnd.loc[idx[:,cond1,[blocks[4],blocks[5]]]]
cond2_bl3=blcnd.loc[idx[:,cond2,[blocks[4],blocks[5]]]]
for df_tmp in [cond1_bl3,cond2_bl3]:
df_tmp.index=df_tmp.index.droplevel([1,2])
out=pd.concat([out,cond1_bl3,cond2_bl3],axis=1)
elif include_blocks == False:
out=pd.concat([all_df,cond1_df,cond2_df],axis=1)
return(out)
def error_fastslow_column_names(cond1,cond2,fast_rt,slow_rt,blocks,weighted):
'''
Provide names for columns that include the condition name as well as the ms entered for too fast\too slow trials.
08-2017
Alexander Millner <alexmillner@gmail.com
'''
if weighted == True:
#All column names for output
col_names=['overall_error_rate','%s_error_rate'%cond1,'%s_error_rate'%cond2]
for bl in range(1,int(len(blocks)/2)+1):
col_names.append('%s_bl%d_error_rate'%(cond1,bl))
col_names.append('%s_bl%s_error_rate'%(cond2,bl))
col_names.extend(['overall_fast_rt_rate_%dms'%(fast_rt),\
'%s_fast_rt_rate_%dms'%(cond1,fast_rt),'%s_fast_rt_rate_%dms'%(cond2,fast_rt)])
for bl in range(1,int(len(blocks)/2)+1):
col_names.append('%s_bl%d_fast_rt_rate_%dms'%(cond1,bl,fast_rt))
col_names.append('%s_bl%d_fast_rt_rate_%dms'%(cond2,bl,fast_rt))
col_names.extend(['overall_slow_rt_rate_%dms'%(slow_rt),\
'%s_slow_rt_rate_%dms'%(cond1,slow_rt),'%s_slow_rt_rate_%dms'%(cond2,slow_rt)])
for bl in range(1,int(len(blocks)/2)+1):
col_names.append('%s_bl%d_slow_rt_rate_%dms'%(cond1,bl,slow_rt))
col_names.append('%s_bl%d_slow_rt_rate_%dms'%(cond2,bl,slow_rt))
col_names.append('num_blocks')
elif weighted == False:
#All column names for output
col_names=['overall_error_rate','%s_error_rate'%cond1,'%s_error_rate'%cond2,\
'overall_fast_rt_rate_%dms'%(fast_rt),\
'%s_fast_rt_rate_%dms'%(cond1,fast_rt),'%s_fast_rt_rate_%dms'%(cond2,fast_rt),\
'overall_slow_rt_rate_%dms'%(slow_rt),\
'%s_slow_rt_rate_%dms'%(cond1,slow_rt),'%s_slow_rt_rate_%dms'%(cond2,slow_rt)]
#Column names for 1\0 output regarding which criteria were flagged (errors, too many fast or slow trials)
flag_col_names= ['%s_flag'%i for i in col_names]
return(col_names,flag_col_names)
def num_trls_column_names(cond1,cond2,fast_rt,slow_rt,blocks,incl_excl_switch,weighted):
'''Column names for number of trials overall, within condition and within block
(with a switch to name both before and after excluding fast\slow trials).
08-2017
Alexander Millner <alexmillner@gmail.com
'''
if weighted == True:
block_num_col_names=['overall_num_trls_%s_fastslow_rt'%(incl_excl_switch),\
'%s_num_trls_%s_fastslow_rt'%(cond1,incl_excl_switch),'%s_num_trls_%s_fastslow_rt'%(cond2,incl_excl_switch)]
for bl in range(1,int(len(blocks)/2)+1):
block_num_col_names.append('%s_bl%d_num_trls_%s_fastslow_rt'%(cond1,bl,incl_excl_switch))
block_num_col_names.append('%s_bl%d_num_trls_%s_fastslow_rt'%(cond2,bl,incl_excl_switch))
elif weighted == False:
block_num_col_names=['overall_num_trls_%s_fastslow_rt'%(incl_excl_switch),\
'%s_num_trls_%s_fastslow_rt'%(cond1,incl_excl_switch),'%s_num_trls_%s_fastslow_rt'%(cond2,incl_excl_switch)]
return(block_num_col_names)
def get_error_fastslow_rates(df,correct,subject,condition,block,cond1,cond2,blocks,flag_outformat,include_blocks,\
rt,fast_rt,slow_rt,error_or_correct,weighted,errors_after_fastslow_rmvd,df_fastslow_rts_rmvd,biat):
'''
Uses blcnd_extract function to get error rates, fast slow rates, etc...
08-2017
Alexander Millner <alexmillner@gmail.com
'''
##Errors
if errors_after_fastslow_rmvd == False:
df_err=df
elif errors_after_fastslow_rmvd == True:
df_err=df_fastslow_rts_rmvd
###Can enter either column where errors are 1 and correct responses are 0 or vice versa
if error_or_correct=='error':
err_vars=blcnd_extract(df_err,correct,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat,include_blocks)
elif error_or_correct=='correct':
err_vars=1-blcnd_extract(df_err,correct,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat,include_blocks)
#Fast RT
df['fast_rt']=(df[rt]<fast_rt)*1
fast_rt_vars=blcnd_extract(df,'fast_rt',subject,condition,block,cond1,cond2,blocks,biat,flag_outformat,include_blocks)
#Slow RT
df['slow_rt']=(df[rt]>=slow_rt)*1
slow_rt_vars=blcnd_extract(df,'slow_rt',subject,condition,block,cond1,cond2,blocks,biat,flag_outformat,include_blocks)
if weighted == True:
## Number of blocks for each subject
num_blocks=df.groupby([subject])[block].unique().apply(lambda x: len(x))
outcms=[err_vars,\
fast_rt_vars,\
slow_rt_vars,\
num_blocks]
elif weighted == False:
outcms=[err_vars,\
fast_rt_vars,\
slow_rt_vars]
return(outcms)
def analyze_iat(df,subject,rt,correct,condition,cond1,cond2,block='block',blocks=[2,3,5,6],weighted=True,\
fast_rt=400,slow_rt=10000,\
overall_err_cut=.3,cond_err_cut=.4,block_err_cut=.4,\
overall_fastslowRT_cut=.10,cond_fastslowRT_cut=.25,block_fastslowRT_cut=.25,\
num_blocks_cutoff=4,\
fastslow_stats=False,biat=False,biat_rmv_xtrls=4,biat_trl_num=False,\
error_or_correct='correct',errors_after_fastslow_rmvd=False,flag_outformat='pct',print_to_excel=False,\
each_stim=False,stimulus=False):
"""Takes a dataframe containing raw IAT (or BIAT) data (all trials, all subjects) and returns
the number of blocks, percentage of errors, reaction times that are too fast and too slow,
flags to remove subjects and D scores for each subject.
Parameters
----------
df : pandas dataframe
Trial x trial IAT data for each subject
subject : str
Column name containing subject number
rt : str
Column name containing reaction time (in ms) for each trial
correct : str
Column name containing whether trial was correct (where correct = 1, error = 0)
(can also use if columns specifies errors; see 'error_or_correct' parameter)
condition : str
Column name containing condition (e.g. Black-Good\White-Bad vs. Black-Bad\White-Good)
cond1 : str
Name of first condition (e.g. 'Black-Good\White-Bad'): bias for this condition will result in negative D score
cond2 : str
Name of second condition (e.g. 'Black-Bad\White-Good'): bias for this condition will result in positive D score
block : str
Column that contains block information
blocks : list
A list containing the numbers corresponding to the relevant blocks, default : [2,3,5,6]
weighted : Boolean
If True return weighted D scores; if False return unweighted D scores, default : True
fast_rt : int
Reaction time (in ms) considered too fast, default: 400
slow_rt : int
Reaction time (in ms) considered too slow, default: 10000
overall_err_cut : float
Cutoff for subject exclusion: overall error rate (decimal), default : .3
cond_err_cut : float
Cutoff for subject exclusion: error rate (decimal) within each condition, default : .4
block_err_cut : float
Cutoff for subject exclusion: error rate (decimal) within a single block, default : .4
overall_fastslowRT_cut=.10
Cutoff for subject exclusion: overall rate of trials with too fast or too slow RT (decimal), default : .1
cond_fastslowRT_cut : float
Cutoff for subject exclusion: rate of trials with too fast or too slow RT (decimal) within each condition, default : .25
block_fastslowRT_cut : float
Cutoff for subject exclusion: rate of trials with too fast or too slow RT (decimal) within each block, default : .25
num_blocks_cutoff : int
Cutoff for subject exclusion: Minimum number of blocks required, default : 4
error_or_correct : str
Enter 'error' to enter a column for 'correct' where error = 1, correct = 0, default: 'correct'
errors_after_fastslow_rmvd : Boolean
If True calculates error rates after removing all fast\slow trials (similar to R package iat); if False error rates calculated with all trials, default : False
fastslow_stats : Boolean
Return a second dataframe containing the number and percentage of fast\slow trials across all subjects
and across subjects with usable data, default : False
biat : Boolean
Enter True if analyzing a Brief Implicit Assoc Test (BIAT), False if regular IAT, default : False
*** One open issue with BIAT flags in pyiat is that currently flags for fast and slow trials use the same cutoff pct.
Recommended scoring procedures (Nosek et al. 2014) recommend a flag for fast trials but not slow.
This is not currently possible in pyiat. However, you can see the pct of slow and fast trials
and create your own flags from this.***
biat_rmv_xtrls : int
Number of trials to remove from beginning of each block. BIAT recommendad scoring procedures (Nosek et al. 2014) remove first 4 trials of each block b/c
they are practice trials but not all BIAT have practice trials, default : 4
biat_trl_num : str
The name of the column that contains trial number, default : False
flag_outformat : str
Can enter 'count' to return number of errors and too fast\slow trials (if fastslow_stats set to True), default : 'pct'
print_to_excel : Boolean
Print an excel workbook that contains output, default : False
each_stim : Boolean
Return D scores for each individual stimulus (i.e. word), default : False
stimulus : Boolean
If each stim = True, then give name of column containing each stimulus (i.e. word), default : False
Returns
-------
pandas DataFrame with
-error rates (overall, each condition, each block (error rates *include* fast\slow trials)),
-rates of fast\slow trials (overall, each condition, each block)
-exclusion flags (overall flag indicating subject should be excluded and for each category informing why subject was flagged)
-D scores (overall and block 1 and block 2 if weighted)
if fastslow_stats = True:
pandas DataFrame with rates of fast\slow trials across all subjects and across only subjects NOT flagged for exclusion
(to report the overall number\pct of trials excluded from a study)
Examples
--------
>>> weighted_d,fastslow_stats_df=iat(it,subject='session_id',rt='latency',
... condition='cond',correct='correct',
... cond1='nosh_me',cond2='sh_me',block='block',
... blocks=[2,3,5,6],fastslow_stats=True,each_stim=False,
... stimulus='trial_name')
Copyright (C) 2017 Alexander Millner <alexmillner@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
idx=pd.IndexSlice
df=df[(df[condition]==cond1)|(df[condition]==cond2)].copy(deep=True)
if df[df[correct]>1].shape[0]!=0 or df[df[correct]<0].shape[0]!=0:
raise ValueError('The \'correct\' column can only contain the values 0 and 1')
#For weighted d scores, we return all block-related stats whereas
#for unweighted we are just comparing conditions and care less about blocks
include_blocks=weighted
#Make column names
col_names,flag_col_names=error_fastslow_column_names(cond1,cond2,fast_rt,slow_rt,blocks,weighted)
block_num_col_names_incl=num_trls_column_names(cond1,cond2,fast_rt,slow_rt,blocks,'incl',weighted)
block_num_col_names_excl=num_trls_column_names(cond1,cond2,fast_rt,slow_rt,blocks,'excl',weighted)
if biat == True:
df_orig=df.copy()
#This finds all unique trials numbers, sorts them and must be greater than the 4th item
df=df[df[biat_trl_num]>=sorted(df[biat_trl_num].unique())[biat_rmv_xtrls]]
df.loc[(df[rt]>2000)&(df[rt]<10000),rt]=2000
df.loc[df[rt]<400,rt]=400
#Make dfs where trials that are too fast or too slow are removed
df_fastslow_rts_rmvd=df[-(df[rt]>=slow_rt)]
if biat == False:
df_fastslow_rts_rmvd=df_fastslow_rts_rmvd[-(df_fastslow_rts_rmvd[rt]<fast_rt)]
#Get error and fast\slow trials
outcms=get_error_fastslow_rates(df,correct,subject,condition,block,cond1,cond2,blocks,flag_outformat,include_blocks,\
rt,fast_rt,slow_rt,error_or_correct,weighted,errors_after_fastslow_rmvd,df_fastslow_rts_rmvd,biat)
#Figure out number of trials after removing fast\slow rt trials
#in each block and total number of fast and slow trials (and remove them)
pre_trl_count_vars=blcnd_extract(df,rt,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat='count',include_blocks=include_blocks)
pre_trl_count_vars.columns=block_num_col_names_incl
post_trl_count_vars=blcnd_extract(df_fastslow_rts_rmvd,rt,subject,condition,block,cond1,cond2,blocks,biat,flag_outformat='count',include_blocks=include_blocks)
post_trl_count_vars.columns=block_num_col_names_excl
if weighted == True:
##Cutoffs for the pct of errors or fast or slow trials that's considered excessive
cutoffs=[overall_err_cut,cond_err_cut,cond_err_cut]
cutoffs.extend(list(np.repeat(block_err_cut,len(blocks))))
cutoffs.extend([overall_fastslowRT_cut,cond_fastslowRT_cut,cond_fastslowRT_cut])
cutoffs.extend(list(np.repeat(block_fastslowRT_cut,len(blocks))))
cutoffs.extend([overall_fastslowRT_cut,cond_fastslowRT_cut,cond_fastslowRT_cut])
cutoffs.extend(list(np.repeat(block_fastslowRT_cut,len(blocks))))
cutoffs.append(num_blocks_cutoff)
elif weighted == False:
##Cutoffs for the pct of errors or fast or slow trials that's considered excessive
cutoffs=[overall_err_cut,cond_err_cut,cond_err_cut,\
overall_fastslowRT_cut,cond_fastslowRT_cut,cond_fastslowRT_cut,\
overall_fastslowRT_cut,cond_fastslowRT_cut,cond_fastslowRT_cut]
#Put together and put into rates - containing just the rates -
#and flags (i.e. whether the rate ) is over a threshold
flags=pd.DataFrame(columns=flag_col_names,index=(df.groupby([subject])[subject].apply(lambda x: x.unique()[0])).tolist())
rates=pd.concat(outcms,axis=1)
rates.columns=col_names
for col,fcol,cutoff in zip(col_names,flag_col_names,cutoffs):
if col!='num_blocks':
flags.loc[:,fcol]=((rates[col]>cutoff)*1)
elif col=='num_blocks':
flags.loc[:,fcol]=((rates[col]<cutoff)*1)
flags['iat_flag']=flags.sum(axis=1)
all_num_trl_per_block=pd.concat([pre_trl_count_vars,post_trl_count_vars],axis=1)
#Get D scores with df with removed fast\slow trials
d=iat_get_dscore(df_fastslow_rts_rmvd,subject,rt,block,condition,cond1,cond2,blocks,weighted,biat,each_stim,stimulus)
all_iat_out = pd.concat([all_num_trl_per_block,rates,flags,d],axis=1)
if each_stim==False:
all_iat_out.loc[all_iat_out.dscore.isnull(),'iat_flag']=all_iat_out.loc[all_iat_out.dscore.isnull(),'iat_flag']+1
#Print output to excel
if print_to_excel==True:
from datetime import datetime
dt=datetime.now()
dt=dt.strftime('%m_%d_%Y_%H_%M_%S')
iat_excel = pd.ExcelWriter('pyiat_output_%s.xlsx'%dt)
all_iat_out.to_excel(iat_excel,sheet_name='pyiat')
if fastslow_stats == True:
if biat == True:
df=df_orig
all_fast_slow_rt=overall_fast_slow_stats(df,rt,fast_rt,slow_rt,subject,flags)
if print_to_excel==True:
all_fast_slow_rt.to_excel(iat_excel,sheet_name='Num_Pct_Fast_Slow_RT_Trials')
iat_excel.save()
return(all_iat_out,all_fast_slow_rt)
elif fastslow_stats == False:
if print_to_excel==True:
iat_excel.save()
return(all_iat_out)
| amillner/pyiat | pyiat/pyiat.py | pyiat.py | py | 32,040 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.IndexSlice",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.IndexSlice",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "pandas.concat",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.In... |
37290685399 | from django.shortcuts import render,redirect
from django.template.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from utilities import utility_functions
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from coupons.models import Coupon
from user_profile.models import UserProfile
from bitasync_site.models import Data_Transfer_Plan
from models import Purchase,PendingPurchase
import hashlib
from django.template import loader
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from utilities.utility_functions import generate_md5_hash
from payline_dotir.payment_gateway import send_url, get_result
from payline_dotir.settings import SEND_URL_FINAL, PAYLINE_DOTIR_API_FINAL
@login_required
def pay_for_a_plan(request,plan_name):
context = {}
#check if the plan is valid.
valid_plans = ["L1","L2","L5","U1","U3","U6"]
if plan_name not in valid_plans :
raise Http404("Data transfer selected is not valid.")
# get the plan the user has selected
all_plans = Data_Transfer_Plan.objects.all()
plan = utility_functions.get_plan_by_name(all_plans,plan_name)
# get the user's coupons
user_profile = UserProfile.objects.get( user = request.user )
user_existing_coupons = Coupon.objects.filter( user_profile = user_profile )
# create the temp plan for the plan selected by user
selected_plan = utility_functions.create_temp_plan(plan, user_existing_coupons)
context['selected_plan'] = selected_plan
# does the user have any coupons?
if not user_existing_coupons:
context['coupon_available'] = False
else:
# if the customer has some coupons
context['coupon_available'] = True
context['existing_coupons'] = user_existing_coupons
# get the best coupon
best_coupon = utility_functions.get_best_coupon(user_existing_coupons)
return render(request,'payment/pay_for_a_plan.html',context)
@login_required
def initialise_payment_payline(request,plan_name):
#check if the plan is valid.
valid_plans = ["L1","L2","L5","U1","U3","U6"]
if plan_name not in valid_plans :
raise Http404("Data transfer selected is not valid.")
# get the plan the user has selected
all_plans = Data_Transfer_Plan.objects.all()
plan = utility_functions.get_plan_by_name(all_plans,plan_name)
# get the user's coupons
user_profile = UserProfile.objects.get( user = request.user )
user_existing_coupons = Coupon.objects.filter( user_profile = user_profile )
# create the temp plan for the plan selected by user
selected_plan = utility_functions.create_temp_plan(plan, user_existing_coupons)
# create a pending purchase
pending_purchase = PendingPurchase()
pending_purchase.data_transfer_plan = plan
pending_purchase.user = request.user
pending_purchase.save()
# prepare amount
if user_existing_coupons:
amount = selected_plan.discounted_price
else:
amount = selected_plan.original_price
# get gateway_url
# integrate pending purchase hashcode in redirect url
redirect_url = 'http://gooshibegooshi.com/payment/result_payline/'+pending_purchase.hashcode+'/'
gateway_url = send_url(amount, redirect_url,SEND_URL_FINAL, PAYLINE_DOTIR_API_FINAL)
# redirect to payline.ir
return redirect(gateway_url)
@csrf_exempt
def result_payline(request,pending_purchase_hashcode):
trans_id = request.POST['trans_id']
id_get = request.POST['id_get']
final_result = get_result(PAYLINE_DOTIR_API_FINAL, trans_id, id_get)
context = {}
# retrieve the pending purchase
pending_purchase = PendingPurchase.objects.get(hashcode = pending_purchase_hashcode)
# get the user's coupons
user_profile = UserProfile.objects.get( user = pending_purchase.user )
user_existing_coupons = Coupon.objects.filter( user_profile = user_profile )
# create the temp plan for the plan selected by user
selected_plan = utility_functions.create_temp_plan(pending_purchase.data_transfer_plan, user_existing_coupons)
context['selected_plan'] = selected_plan
response = None
if final_result is None:
response = pay_for_a_plan_failure(request,context)
else:
if int(final_result) == 1:
response = pay_for_a_plan_success(request,pending_purchase,context,user_existing_coupons,selected_plan)
else:
response = pay_for_a_plan_failure(request,context)
# remove pending purchase
pending_purchase.delete()
return response
def pay_for_a_plan_success(request,pending_purchase,context,user_existing_coupons,selected_plan):
# add the purchase to the database
new_purchase = Purchase()
new_purchase.user = pending_purchase.user
new_purchase.data_transfer_plan = pending_purchase.data_transfer_plan
if user_existing_coupons:
new_purchase.amount_paid = selected_plan.discounted_price
else:
new_purchase.amount_paid = selected_plan.original_price
new_purchase.remaining_allowance_frequency = pending_purchase.data_transfer_plan.freq
new_purchase.save()
# save follow_up number using hash
follow_up_number = generate_md5_hash(str(new_purchase.id))
new_purchase.follow_up_number = follow_up_number
new_purchase.save()
context['follow_up_number'] = follow_up_number
# if necessary, remove user's best coupon
if user_existing_coupons:
best_coupon = utility_functions.get_best_coupon(user_existing_coupons)
best_coupon.delete()
# send an email
plaintext = loader.get_template('payment/pay_for_a_plan_complete_email.txt')
htmly = loader.get_template('payment/pay_for_a_plan_complete_email.html')
subject = loader.get_template('payment/pay_for_a_plan_complete_email_subject.html')
subject_content = subject.render(context).replace('\n',' ')
text_content = plaintext.render(context)
html_content = htmly.render(context)
from_email = 'sales@gooshibegooshi.com'
recipient_list = [new_purchase.user.email]
msg = EmailMultiAlternatives(subject_content, text_content, from_email, recipient_list)
msg.attach_alternative(html_content, "text/html")
msg.send()
# return response to the user.
return render(request,'payment/successful_payment.html',context)
def pay_for_a_plan_failure(request,context):
return render(request,'payment/failed_payment.html',context)
| bitapardaz/bitasync | payment/views.py | views.py | py | 6,561 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bitasync_site.models.Data_Transfer_Plan.objects.all",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "bitasync_site.models.Data_Transfer_Plan.objects",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "bitasync_site.models.Data_Transfer_Plan"... |
73421338023 |
import sys
import json
import buildCNNModel as cnn
from loadutils import retrieve_model, loadProcessedData, saveDevPredictionsData
from evaluation_helper import convert_raw_y_pred, get_f1, get_precision, get_recall
import numpy as np
def printUsage():
print("USAGE:\n\ntrain a CNN model")
print("All training data must have already been saved with loadutils.saveProcessedData()")
print("<model name> <hyper parameters file (JSON)> ")
def main():
"""
command line arguments:
<model name> <hyper parameters file (JSON)>
"""
if len(sys.argv) < 3:
printUsage()
return -1
modelName = sys.argv[1]
with open(sys.argv[2]) as fp:
hypers = json.load( fp)
trainX, trainX_capitals_cat, trainX_pos_cat, devX, devX_capitals_cat, \
devX_pos_cat, trainY_cat, devY_cat, embedding_matrix, train_decoderY, dev_decoderY = loadProcessedData()
# contruct training dicts
trainX_dict = {'x':trainX}
devX_list_arrayS = [devX]
trainY_dict = {'out_pred':trainY_cat}
devY_list_arrayS = [devY_cat]
# for final prediction
devX_dict = {'x':devX} #for model_eval only
if hypers["use_pos_tags"]:
trainX_dict["x_pos"] = trainX_pos_cat
devX_list_arrayS += [devX_pos_cat]
devX_dict["x_pos"] = devX_pos_cat #for model_eval only
if hypers['use_capitalization_info']:
trainX_dict["x_capital"] = trainX_capitals_cat
devX_list_arrayS += [devX_capitals_cat]
devX_dict["x_capital"] = devX_capitals_cat #for model_eval only
model = cnn.draw_cnn_model( hyper_param=hypers, embedding_matrix=embedding_matrix, verbose=True)
model = cnn.compile_cnn_model( hypers, model)
print( "Training Model:", modelName)
cnn.fit_model( hypers, model, modelName, trainX_dict, devX_list_arrayS, trainY_dict, devY_list_arrayS)
# save the last model in each epoch and its weights
with open('./result/'+ modelName + '_model_architecture.json', 'w') as f:
f.write(model.to_json())
model.save_weights('./result/' + modelName + '_weights_model.h5')
raw_y_pred = model.predict(devX_dict, verbose=1)
y_true = convert_raw_y_pred(devY_cat)
print ("prediction on dev set finished. raw 1-hot prediction has shape {}".format(raw_y_pred.shape))
y_pred = convert_raw_y_pred(raw_y_pred)
print ("prediction converted to class idx has shape {}".format(y_pred.shape))
precision = get_precision(y_true, y_pred)
recall = get_recall(y_true, y_pred)
f1_score = get_f1(y_true, y_pred)
print ("precision on dev = {}".format(precision))
print ("recall on dev = {}".format(recall))
print ("f1 score on dev = {}".format(f1_score))
# write out dev predictions
modelsDir = 'dev_Predictions'
print ("saving prediction data under directory: {}".format(modelsDir))
saveDevPredictionsData(modelName=modelName, raw_y_pred=raw_y_pred, raw_y_pred_decoder_embeddings=np.empty(0), y_pred=y_pred, modelsDir=modelsDir)
print ("please use loadutils.loadDevPredictionsData(modelName, modelsDir='dev_Predictions') to load :\n raw_y_pred, raw_y_pred_decoder_embeddings(empty array for CNN), y_pred")
if __name__ == '__main__':
main()
| Chucooleg/CapsNet_for_NER | code/trainCNNModel.py | trainCNNModel.py | py | 3,296 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_numbe... |
41644824235 | from pathlib import Path
import string
import unicodedata
import time
import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam
def find_files(path, pattern):
return Path(path).glob(pattern)
names_dir = './datasets/data/names'
pat = '*.txt'
print(list(find_files(names_dir, pat)))
letters = string.ascii_letters + " .,;'"
n_letters = len(letters)
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in letters
)
print(unicode_to_ascii('Ślusàrski'))
def read_lines(path):
with open(path, encoding='utf-8') as f:
return [ unicode_to_ascii(line) for line in f ]
categories = []
category_lines = {}
for f in find_files(names_dir, pat):
category = f.name.split('.')[0]
categories.append(category)
lines = read_lines(f)
category_lines[category] = lines
n_categories = len(categories)
print(category_lines['Italian'][:5])
def letter_to_tensor(letter):
i = letters.index(letter)
tensor = torch.zeros(1, n_letters)
tensor[0][i] = 1
return tensor
def line_to_tensor(line):
letter_tensors = [letter_to_tensor(letter) for letter in line]
return torch.cat(letter_tensors).view(len(line), 1, -1)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.gru = nn.GRU(input_size, hidden_size)
self.h2o = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def init_hidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size)
def forward(self, input):
batch_size = input.size()[1]
hidden = self.init_hidden(batch_size)
gru_out, h_n = self.gru(input, hidden)
output = self.h2o(h_n).view(batch_size, -1)
output = self.softmax(output)
return output
def random_choice(l):
return np.random.choice(l)
def random_training_example():
i = np.random.randint(n_categories)
category = categories[i]
line = random_choice(category_lines[category])
category_tensor = torch.tensor([i], dtype=torch.long)
line_tensor = line_to_tensor(line)
return category, line, category_tensor, line_tensor
def category_from_output(output):
i = output.argmax().item()
return categories[i], i
def time_since(since):
now = time.time()
s = now - since
m = np.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
hidden_size = 128
rnn = RNN(n_letters, hidden_size, n_categories)
criterion = nn.NLLLoss()
lr = 0.005
optimizer = Adam(rnn.parameters(), lr)
n_iters = 100000
print_every = 5000
plot_every = 1000
current_loss = 0
all_losses = []
start = time.time()
for it in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = random_training_example()
optimizer.zero_grad()
output = rnn(line_tensor)
loss = criterion(output, category_tensor)
loss.backward()
optimizer.step()
current_loss += loss.item()
# Print iter number, loss, name and guess
if it % print_every == 0:
guess, guess_i = category_from_output(output)
correct = '√' if guess == category else '× (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (it, it / n_iters * 100, time_since(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if it % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
plt.plot(all_losses)
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
for i in range(n_confusion):
category, line, category_tensor, line_tensor = random_training_example()
output = rnn(line_tensor)
guess, guess_i = category_from_output(output)
category_i = categories.index(category)
confusion[category_i][guess_i] += 1
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
| sbl1996/pytorch-snippets | char_lstm.py | char_lstm.py | py | 4,373 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "unicodedata.normalize",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "unicoded... |
32694277113 | import forecast
import send_sms
from datetime import datetime
# Since the api call is made at 6:00 AM, hourly_forecast[0] is 6 AM
def main():
startTimes = [8, 8, 8, 8, 8]
endTimes = [18, 16, 18, 18, 10]
date = datetime.today()
dayOfWeek = date.weekday()
message = ""
phone_number = "+19257877379"
hourly_forecast = forecast.get_hourly_forecast("CA", "Goleta")
for i in range(5):
if (dayOfWeek == i):
minTemp = int(hourly_forecast[startTimes[dayOfWeek] - 6]['temp']['english'])
maxTemp = int(hourly_forecast[startTimes[dayOfWeek] - 6]['temp']['english'])
minTempTime = startTimes[dayOfWeek]
maxTempTime = endTimes[dayOfWeek]
for j in range(startTimes[dayOfWeek] - 6, endTimes[dayOfWeek] - 5):
if ("Rain" in hourly_forecast[j]['condition']):
message += "Rain forecasted at " + str(j % 12) + ":00. "
if (int(hourly_forecast[j]['temp']['english']) < minTemp):
minTemp = int(hourly_forecast[j]['temp']['english'])
minTempTime = j + 6
if (int(hourly_forecast[j]['temp']['english']) > maxTemp):
maxTemp = int(hourly_forecast[j]['temp']['english'])
maxTempTime = j + 6
message += "Min temp today is " + str(minTemp) + " at " \
+ str(minTempTime) + ":00. "
message += "Max temp today is " + str(maxTemp) + " at " \
+ str(maxTempTime) + ":00. "
#print(message)
send_sms.send_message(phone_number, message)
# checked hours should depend on day of the week
if __name__ == '__main__':
main()
| kailashbaas/Weather-SMS | main.py | main.py | py | 1,681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.today",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "forecast.get_hourly_forecast",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "... |
34535894055 | #!/usr/bin/python
# open a microphone in pyAudio and get its FFT spectrum
import pyaudio
import numpy as np
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.08
GLIDING_DIVIDER = 4
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME/GLIDING_DIVIDER)
soundtype = np.dtype([('l',np.int16),('r',np.int16)])
class Listener(object):
def __init__(self):
self.pa = pyaudio.PyAudio()
self.stream = self.open_mic_stream()
raw = self.listen()
for i in range(1,GLIDING_DIVIDER):
raw += self.listen()
stereodata = np.fromstring(raw,soundtype)
self.buf = (stereodata['l'] + stereodata['r'])/2
def stop(self):
self.stream.close()
def open_mic_stream( self ):
stream = self.pa.open( format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = None,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
return stream
def listen(self):
try:
block = self.stream.read(INPUT_FRAMES_PER_BLOCK)
except IOError:
return
return block
# Returns the FFT of a sound sample recorded over INPUT_BLOCK_TIME.
# This is a numpy array of RATE*INPUT_BLOCK_TIME/2 values.
# The i-th element represents the frequency i/INPUT_BLOCK_TIME
def get_spectrum(self):
raw = self.listen()
stereodata = np.fromstring(raw,soundtype)
monodata = (stereodata['l'] + stereodata['r'])/2
self.buf[:-len(monodata)] = self.buf[len(monodata):]
self.buf[-len(monodata):] = monodata
return abs(np.fft.rfft(self.buf))
| maralorn/pythonlights | sound.py | sound.py | py | 1,790 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyaudio.paInt16",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.dtype",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pyaudio.PyAudio",
... |
9571225314 | from wq.db import rest
from .models import Site, AssessmentType, Assessment, Map
from .serializers import AssessmentTypeSerializer, AssessmentSerializer, MapSerializer
from django.conf import settings
rest.router.register_model(
Site,
fields="__all__",
cache="none",
map=[{
'mode': 'list',
'autoLayers': True,
}, {
'mode': 'detail',
'autoLayers': True,
}, {
'mode': 'edit',
'autoLayers': True,
}],
# partial=True,
)
rest.router.register_model(
AssessmentType,
serializer=AssessmentTypeSerializer,
fields="__all__",
)
# this could enable filtering of own assessments
def user_filter(qs, request):
if request.user.is_authenticated():
return qs.filter(user=request.user)
else:
return qs.none()
rest.router.register_model(
Assessment,
serializer=AssessmentSerializer,
fields="__all__",
cache="none",
map=[{
'mode': 'list',
'autoLayers': True,
}, {
'mode': 'detail',
'autoLayers': True,
}],
)
rest.router.register_model(
Map,
serializer=MapSerializer,
fields="__all__",
)
rest.router.add_page('index', {'url': ''})
rest.router.add_page('locate', {
'url': 'locate',
'map': {'layers': []},
'locate': True
})
rest.router.set_extra_config(
mapbox_token=settings.MAPBOX_TOKEN,
)
| erikriver/disasters | db/assessments/rest.py | rest.py | py | 1,386 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wq.db.rest.router.register_model",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "models.Site",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "wq.db.rest.router",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "... |
7706234863 | from pathlib import Path
from ruamel.yaml import YAML
yaml = YAML()
def get_tasks_files():
matches = []
matches.extend(list(Path(".").rglob("tasks/*.yaml")))
matches.extend(list(Path(".").rglob("tasks/*.yml")))
matches.extend(list(Path(".").rglob("handlers/*.yaml")))
matches.extend(list(Path(".").rglob("handlers/*.yml")))
return matches
# Take a list as input, for each item find a key that contains dots, split the key
# by dots and if the resulting list has 3 items, return the key
def get_module_from_list(data: list):
modules: list[str] = []
for item in data:
for key in item:
if "." not in key:
continue
elif len(key.split(".")) == 3:
modules.append(key)
break
else:
print(f"module not found for task {item.get('name')}")
return modules
# Take a Path object as input, read the content, parse it with ruamel.yaml
# and for each dict in the resulting list, return the key that contains dots
def get_modules_from_file(file: Path):
modules: list[str] = []
if not file.is_file():
return modules
with open(file, "r") as f:
data = yaml.load(f)
if not data:
return modules
return get_module_from_list(data)
# find all modules used in tasks and handlers
def get_modules():
modules = []
for file in get_tasks_files():
modules.extend(get_modules_from_file(file))
return modules
# Take a list as input, split each item by dots and return a set of the first 2 items
def get_collections(modules: list[str]):
collections = set()
for module in modules:
collections.add(".".join(module.split(".")[:2]))
return collections
print(get_collections(get_modules()))
| jonsible/iac | find_modules.py | find_modules.py | py | 1,795 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ruamel.yaml.YAML",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_num... |
10227649807 | """The page module holds the Page class for the web page factory"""
from pathlib import Path
from typing import List
from factory.elements import Element
class Page:
"""Page class holds elements of a web page"""
def __init__(self, name: str, route: str, elements: List[Element]) -> None:
"""Create the Page instance"""
self.name = name
self.route = Path(route)
self.elements = elements
@property
def html(self) -> str:
"""Compile HTML from each of the page elements"""
out = ["<!doctype html>"]
for element in self.elements:
out += element.html
return "\n".join(out)
@property
def html_path(self) -> Path:
"""Return the html path for the page"""
return Path("templates").joinpath(self.route).with_suffix(".html")
def to_html(self) -> None:
"""Write the Page's HTML out"""
if not self.html_path.parent.exists():
self.html_path.parent.mkdir()
with open(self.html_path, mode="w", encoding="utf-8") as outfile:
outfile.writelines(self.html)
| brianjstroh/bstroh | factory/page.py | page.py | py | 1,113 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "factory.elements.Element",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
... |
769348877 | from get_notes import get_notes
from model import create_network
import pandas as pd
import numpy
import json
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
def train_network():
notes = get_notes()
with open("data/notes.json", "w") as filename:
json.dump(notes, filename)
notes_df = pd.DataFrame(notes, columns=['pitch', 'duration'])
pitches = notes_df['pitch']
durations = notes_df['duration']
pitch_vocab = sorted(set(item for item in pitches))
duration_vocab = sorted(set(item for item in durations))
with open("data/pitch_vocab.json", "w") as filename:
json.dump(pitch_vocab, filename)
with open("data/duration_vocab.json", "w") as filename:
json.dump(duration_vocab, filename)
# print("notes_df:")
# print(notes_df)
look_back = 4
in_pitches, in_durations, out_pitches, out_durations = prepare_sequences(notes_df, look_back)
model = create_network(timesteps=look_back,
pitch_vocab_size=len(pitch_vocab),
duration_vocab_size=len(duration_vocab))
model.summary()
train(model, in_pitches, in_durations, out_pitches, out_durations)
def prepare_sequences(notes, look_back):
pitches = notes['pitch']
durations = notes['duration']
pitch_vocab = sorted(set(item for item in pitches))
duration_vocab = sorted(set(item for item in durations))
print("pitch_vocab:")
print(pitch_vocab)
print("duration_vocab:")
print(duration_vocab)
pitch_to_int = dict((note, number) for number, note in enumerate(pitch_vocab))
duration_to_int = dict((note, number) for number, note in enumerate(duration_vocab))
pitches_in = []
durations_in = []
pitches_out = []
durations_out = []
for i in range(notes.shape[0] - look_back):
pitch_sequence_in = pitches[i:(i + look_back)]
pitch_sequence_out = pitches[i + look_back]
duration_sequence_in = durations[i:(i + look_back)]
duration_sequence_out = durations[i + look_back]
pitches_in.append([pitch_to_int[char] for char in pitch_sequence_in])
pitches_out.append(pitch_to_int[pitch_sequence_out])
durations_in.append([duration_to_int[char] for char in duration_sequence_in])
durations_out.append(duration_to_int[duration_sequence_out])
pitches_in = numpy.array(pitches_in)
durations_in = numpy.array(durations_in)
pitches_out = numpy.array(pitches_out)
durations_out = numpy.array(durations_out)
pitches_in = np_utils.to_categorical(pitches_in)
durations_in = np_utils.to_categorical(durations_in)
pitches_out = np_utils.to_categorical(pitches_out)
durations_out = np_utils.to_categorical(durations_out)
# print('\npitches_in:')
# print(pitches_in)
#
# print('\npitches_out:')
# print(pitches_out)
#
# print('\ndurations_in:')
# print(durations_in)
#
# print('\ndurations_out:')
# print(durations_out)
return (pitches_in, durations_in, pitches_out, durations_out)
def train(model, pitch_in, duration_in, pitch_out, duration_out):
""" train the neural network """
filepath = "weights/weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(
filepath,
monitor='loss',
verbose=0,
save_best_only=True,
mode='min'
)
callbacks_list = [checkpoint]
model.fit([pitch_in, duration_in], [pitch_out, duration_out], epochs=20, batch_size=16, callbacks=callbacks_list)
if __name__ == '__main__':
train_network()
| tanelxen/riff-composer | train.py | train.py | py | 3,628 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "get_notes.get_notes",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_... |
29935238101 | import numpy as np
import matplotlib.pyplot as plt
import json
import matplotlib as mpl
import matplotlib.cm as cm
import cmocean
from colormaputil import truncate_colormap
def getMaxBracket(minYear, maxYear, data):
curMax = 0
for year in range(minYear, maxYear):
ranges = data[str(year)]['ranges']
for num in ranges:
if num > curMax:
curMax = num
return curMax
def getColour(year, i, m, data):
return m.to_rgba(data[year]['ranges'][i])
maxYear = 2018
minYear = 1985
json_data = open('canada.json').read()
data = json.loads(json_data)
norm = mpl.colors.Normalize(vmin=0, vmax=getMaxBracket(minYear, maxYear, data))
cmap = truncate_colormap(cmocean.cm.phase, 0.35, 1)
m = cm.ScalarMappable(norm=norm, cmap=cmap)
ind = np.arange(maxYear-minYear)
for year in range(minYear, maxYear):
before = [0] * (year - minYear)
after = [0] * (maxYear - year-1)
rates = data[str(year)]['rates']
previous = 0
for i in range(len(rates)):
height = [rates[i]-previous]
plt.bar(ind, tuple(before + height + after), 1,
color=getColour(str(year), i, m, data), bottom=previous, linewidth=0)
previous = rates[i]
m._A = []
small = 9
medium = 11
large = 12
clb = plt.colorbar(m, format='>$%d', ticks=[a for a in range(0, getMaxBracket(minYear, maxYear, data), 10000)])
clb.set_label('Tax Bracket (CAD):', labelpad=-40, y=1.06, rotation=0, fontsize=large)
clb.ax.tick_params(labelsize=medium)
plt.xlim([0, maxYear-minYear])
plt.title('% Personal Income Federally Taxed in Canada, 1985-2017', fontsize=large)
plt.ylabel('% Tax\nApplied', fontsize=large, rotation=0, labelpad=25)
plt.xticks(ind, [a for a in range(minYear, maxYear)], rotation=60, fontsize=small, y=0.01)
plt.yticks(fontsize=medium)
plt.gca().yaxis.grid(which='major', linestyle='-', linewidth=0.8)
plt.gca().xaxis.grid(which='major', linestyle='-', linewidth=0.5)
plt.gca().yaxis.grid(which='minor', linestyle='-', linewidth=0)
plt.gca().xaxis.grid(False, which='minor')
plt.gca().tick_params(axis='x', which='both', length=0)
plt.xlabel("github.com/rosslh/historical-tax-rate-visualizor", fontsize=small, color='#777777')
plt.minorticks_on()
plt.savefig('figure.png', dpi=400)
| rosslh/Historical-Tax-Rate-Visualizor | plot.py | plot.py | py | 2,244 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.Normalize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "colorma... |
16049471546 | import subprocess
import os
import logging
import platform
from tqdm import tqdm
from ffcuesplitter.exceptions import FFMpegError, FFCueSplitterError
from ffcuesplitter.utils import makeoutputdirs, Popen
if not platform.system() == 'Windows':
import shlex
class FFMpeg:
"""
FFMpeg is the base class interface for FFCueSplitter.
It represents FFmpeg command and arguments with their
sub-processing. Note: Opus sample rate is always 48kHz for
fullband audio.
"""
DATACODECS = {'wav': 'pcm_s16le -ar 44100',
'flac': 'flac -ar 44100',
'ogg': 'libvorbis -ar 44100',
'opus': 'libopus',
'mp3': 'libmp3lame -ar 44100',
}
def __init__(self, **kwargs):
"""
Constructor
"""
self.kwargs = kwargs
self.outsuffix = None
# -------------------------------------------------------------#
def codec_setup(self, sourcef):
"""
Returns codec arg based on given format
Raises:
FFCueSplitterError from KeyError
if an unsupported format is given.
Returns:
tuple(codec, outsuffix)
"""
if self.kwargs['outputformat'] == 'copy':
self.outsuffix = os.path.splitext(sourcef)[1].replace('.', '')
codec = '-c copy'
else:
try:
self.outsuffix = self.kwargs['outputformat']
codec = f'-c:a {FFMpeg.DATACODECS[self.outsuffix]}'
except KeyError as error:
msgerr = f"Unsupported format '{self.outsuffix}'"
raise FFCueSplitterError(f'{msgerr}') from error
return codec, self.outsuffix
# -------------------------------------------------------------#
def commandargs(self, audiotracks: (list, tuple)) -> dict:
"""
Builds the FFmpeg command argument string and assign
the corresponding duration and name to each audio track.
It expects a list type object.
Returns:
dict(recipes)
"""
data = []
meters = {'tqdm': '-progress pipe:1 -nostats -nostdin', 'standard': ''}
for track in audiotracks:
codec, suffix = self.codec_setup(track["FILE"])
metadata = {'ARTIST': track.get('PERFORMER', ''),
'ALBUM': track.get('ALBUM', ''),
'TITLE': track.get('TITLE', ''),
'TRACK': (str(track['TRACK_NUM'])
+ '/' + str(len(audiotracks))),
'DISCNUMBER': track.get('DISCNUMBER', ''),
'GENRE': track.get('GENRE', ''),
'DATE': track.get('DATE', ''),
'COMMENT': track.get('COMMENT', ''),
'DISCID': track.get('DISCID', ''),
}
cmd = f'"{self.kwargs["ffmpeg_cmd"]}" '
cmd += f' -loglevel {self.kwargs["ffmpeg_loglevel"]}'
cmd += f" {meters[self.kwargs['progress_meter']]}"
fpath = os.path.join(self.kwargs["dirname"], track["FILE"])
cmd += f' -i "{fpath}"'
cmd += f" -ss {round(track['START'] / 44100, 6)}" # ff to secs
if 'END' in track:
cmd += f" -to {round(track['END'] / 44100, 6)}" # ff to secs
for key, val in metadata.items():
cmd += f' -metadata {key}="{val}"'
cmd += f' {codec}'
cmd += f" {self.kwargs['ffmpeg_add_params']}"
cmd += ' -y'
num = str(track['TRACK_NUM']).rjust(2, '0')
name = f'{num} - {track["TITLE"]}.{suffix}'
cmd += f' "{os.path.join(self.kwargs["tempdir"], name)}"'
args = (cmd, {'duration': track['DURATION'], 'titletrack': name})
data.append(args)
return {'recipes': data}
# --------------------------------------------------------------#
def command_runner(self, arg, secs):
"""
Redirect to required runner. Note: tqdm command args
is slightly different from standard command args because
tqdm adds `-progress pipe:1 -nostats -nostdin` to arguments,
see `meters` on `commandargs`.
This method must return if the `dry` keyword arg is true.
"""
if self.kwargs['progress_meter'] == 'tqdm':
cmd = arg if platform.system() == 'Windows' else shlex.split(arg)
if self.kwargs['dry'] is True:
return cmd
self.run_ffmpeg_command_with_progress(cmd, secs)
elif self.kwargs['progress_meter'] == 'standard':
cmd = arg if platform.system() == 'Windows' else shlex.split(arg)
if self.kwargs['dry'] is True:
return cmd
self.run_ffmpeg_command(cmd)
return None
# --------------------------------------------------------------#
def run_ffmpeg_command_with_progress(self, cmd, seconds):
"""
Run FFmpeg sub-processing showing a tqdm progress meter
for each loop. Also writes a log file to the output
destination directory.
Usage for get elapsed seconds:
progbar = tqdm(total=round(seconds), unit="s", dynamic_ncols=True)
progbar.clear()
previous_s = 0
s_processed = round(int(output.split('=')[1]) / 1_000_000)
s_increase = s_processed - previous_s
progbar.update(s_increase)
previous_s = s_processed
Raises:
FFMpegError
Returns:
None
"""
makeoutputdirs(self.kwargs['outputdir']) # Make dirs for files dest.
progbar = tqdm(total=100,
unit="s",
dynamic_ncols=True
)
progbar.clear()
sep = (f'\nFFcuesplitter Command: {cmd}\n'
f'=======================================================\n\n')
try:
with open(self.kwargs['logtofile'], "a", encoding='utf-8') as log:
log.write(sep)
with Popen(cmd,
stdout=subprocess.PIPE,
stderr=log,
bufsize=1,
encoding='utf8',
universal_newlines=True) as proc:
for output in proc.stdout:
if "out_time_ms" in output.strip():
s_processed = int(output.split('=')[1]) / 1_000_000
percent = s_processed / seconds * 100
progbar.update(round(percent) - progbar.n)
if proc.wait(): # error
logging.error("Popen proc.wait() Exit status %s",
proc.wait())
progbar.close()
raise FFMpegError(f"ffmpeg FAILED, See log details: "
f"'{self.kwargs['logtofile']}'")
except (OSError, FileNotFoundError) as excepterr:
progbar.close()
raise FFMpegError(excepterr) from excepterr
except KeyboardInterrupt as err:
# proc.kill()
progbar.close()
proc.terminate()
msg = "[KeyboardInterrupt] FFmpeg process failed."
raise FFMpegError(msg) from err
progbar.close()
# --------------------------------------------------------------#
def run_ffmpeg_command(self, cmd):
"""
Run FFmpeg sub-processing with stderr output to console.
The output depending on the ffmpeg loglevel option.
Raises:
FFMpegError
Returns:
None
"""
makeoutputdirs(self.kwargs['outputdir']) # Make dirs for output files
sep = (f'\nFFcuesplitter Command: {cmd}\n'
f'=======================================================\n\n')
with open(self.kwargs['logtofile'], "a", encoding='utf-8') as log:
log.write(sep)
try:
subprocess.run(cmd, check=True, shell=False, encoding='utf8',)
except FileNotFoundError as err:
raise FFMpegError(f"{err}") from err
except subprocess.CalledProcessError as err:
raise FFMpegError(f"ffmpeg FAILED: {err}") from err
except KeyboardInterrupt as err:
msg = "[KeyboardInterrupt] FFmpeg process failed."
raise FFMpegError(msg) from err
| jeanslack/FFcuesplitter | ffcuesplitter/ffmpeg.py | ffmpeg.py | py | 8,582 | python | en | code | 21 | github-code | 36 | [
{
"api_name": "platform.system",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "ffcuesplitter.exceptions... |
2265860444 | import os
import sys
import importlib
import pkgutil
from contextlib import contextmanager
from typing import TypeVar, Union, Generator
from pathlib import Path
PathType = Union[os.PathLike, str]
T = TypeVar("T")
ContextManagerFunctionReturnType = Generator[T, None, None]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
@contextmanager
def push_python_path(path: PathType) -> ContextManagerFunctionReturnType[None]:
"""
Source: https://github.com/allenai/allennlp/blob/main/allennlp/common/util.py
"""
path = Path(path).resolve()
path = str(path)
sys.path.insert(0, path)
try:
yield
finally:
sys.path.remove(path)
def import_module_and_submodules(package_name: str) -> None:
"""
Source: https://github.com/allenai/allennlp/blob/main/allennlp/common/util.py
"""
importlib.invalidate_caches()
with push_python_path("."):
module = importlib.import_module(package_name)
path = getattr(module, "__path__", [])
path_string = "" if not path else path[0]
for module_finder, name, _ in pkgutil.walk_packages(path):
if path_string and module_finder.path != path_string:
continue
subpackage = f"{package_name}.{name}"
import_module_and_submodules(subpackage)
def print_dict(f, d, prefix=" ", incr_prefix=" "):
if not isinstance(d, dict):
f.write("%s%s\n" % (prefix, d))
if isinstance(d, tuple):
for x in d:
if isinstance(x, dict):
print_dict(f, x, prefix + incr_prefix, incr_prefix)
return
sorted_keys = sorted(d.keys())
for k in sorted_keys:
v = d[k]
if isinstance(v, dict):
f.write("%s%s:\n" % (prefix, k))
print_dict(f, v, prefix + incr_prefix, incr_prefix)
elif isinstance(v, list):
f.write("%s%s:\n" % (prefix, k))
for x in v:
print_dict(f, x, prefix + incr_prefix, incr_prefix)
else:
f.write("%s%s: %s\n" % (prefix, k, v))
| BorealisAI/DT-Fixup | spider/semparser/common/utils.py | utils.py | py | 2,616 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.PathLike",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "typing.TypeVar",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typing.Generator",
"li... |
34014676895 | from flask import Flask, request, abort, render_template, make_response
import json, requests
from StringIO import StringIO
from time import sleep
try:
from metatool import metatool
except ImportError:
import metatool
try:
from metatool import viz
except ImportError:
import viz
try:
from metatool import config
except ImportError:
import config
try:
from metatool import models
except ImportError:
import models
try:
from metatool import generate_test_data
except ImportError:
import generate_test_data
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html', baseurl=config.BASE_URL)
@app.route("/validate", methods=["POST", "GET"])
def validate():
mt = request.values.get("modeltype")
f = None
if request.method == "POST":
f = request.files.get("model")
elif request.method == "GET":
url = request.values.get("url")
resp = requests.get(url)
f = StringIO(resp.text)
fieldsets = metatool.validate_model(mt, f)
html = metatool.fieldsets_to_html(fieldsets)
return render_template("results.html", tables=html, baseurl=config.BASE_URL)
@app.route("/cerifeye", methods=["POST", "GET"])
def cerifeye():
mt = request.values.get("modeltype")
f = None
if request.method == "POST":
f = request.files.get("model")
elif request.method == "GET":
url = request.values.get("url")
resp = requests.get(url)
f = StringIO(resp.text)
nodes = viz.get_nodes(mt, f)
return render_template("cerifview.html", nodes=json.dumps(nodes), baseurl=config.BASE_URL)
@app.route("/visualise", methods=["POST", "GET"])
def visualise():
mt = request.values.get("modeltype")
f = None
if request.method == "POST":
f = request.files.get("model")
elif request.method == "GET":
url = request.values.get("url")
resp = requests.get(url)
f = StringIO(resp.text)
nodes = viz.get_nodes(mt, f)
return render_template("viz.html", nodes=json.dumps(nodes), baseurl=config.BASE_URL)
@app.route("/acat", methods=["GET"])
def acat_facetview():
return render_template("acat_search.html", es_host=config.ES_HOST, es_index='acat')
@app.route("/aggregate/publications", methods=["GET"])
def publications_facetview():
return render_template("aggregate_publications.html", es_host=config.ES_HOST, es_index='ukriss')
@app.route("/aggregate/publications/generate", methods=["GET"])
@app.route("/aggregate/publications", methods=["POST"])
def generate_publications():
# make sure index is created and has right mappings
init_status_code = models.Publication.initialise_index()
if init_status_code != 200:
return '''Elasticsearch has a problem initialising the {0} index, it returned a {1} HTTP status code.
Check the elasticsearch log for exceptions.'''.format(models.Publication.es_index, init_status_code)
how_many = 1000
generate_test_data.generate_and_index(how_many)
models.Publication.refresh()
sleep(1) # give ES a bit of time to do the refresh
return "Generated {0} publication records".format(how_many)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=5007)
| CottageLabs/metatool | metatool/web.py | web.py | py | 3,276 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "config.BASE_URL",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "flask.request.... |
1938073175 | from pathlib import Path
import json
from .util import filter_fields
def KMANGLED_encode(
data, sort_keys=False, indent=None, ignore_private=False, ignore_none=False
):
return json.dumps(
filter_fields(data, ignore_private, ignore_none),
sort_keys=sort_keys,
indent=indent,
)
def KMANGLED_decode(value: str):
return json.loads(value)
def KMANGLED_dump_to_file(
data,
filename: str,
sort_keys=False,
indent=None,
ignore_private=False,
ignore_none=False,
):
json_str = KMANGLED_encode(data, sort_keys, indent, ignore_private, ignore_none)
Path(filename).write_text(json_str)
| kcl-lang/kcl-py | kclvm/compiler/extension/builtin/system_module/json.py | json.py | py | 652 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "util.filter_fields",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_num... |
27876220800 | # Core Pkgs
import streamlit as st
#Other Pkgs
#EDA Pkgs
import pandas as pd
import codecs
from pandas_profiling import ProfileReport
#Component Pkgs
import streamlit.components.v1 as components #v1 is version1 : If new features are added, then it will not break your app
from streamlit_pandas_profiling import st_profile_report
#Custom Component Functions
import sweetviz as sv
def st_display_sweetviz(report_html, width=1000,height = 500):
report_file = codecs.open(report_html, 'r') #codecs help in reading html file
page = report_file.read()
components.html(page,width= width, height=height, scrolling=True)
def main():
"""A Simple EDA App with Streamlit Components (Using Pandas Profiling and Sweetviz in Streamlit)"""
menu = ["Pandas Profile", "Sweetviz"]
choice = st.sidebar.selectbox("Menu", menu)
if choice == "Pandas Profile":
st.subheader("Automated Exploratory Data Analsis (with Pandas Profile)")
st.write("EDA is the task of analyzing data from statistics, simple plotting tools, linear algebra and other techniques to understand what the dataset is, before we go to actual machine learning.")
st.write("Pandas Profile generates profile reports from a pandas DataFrame. Pandas Profiling extends the pandas DataFrame for quick data analysis.")
st.set_option('deprecation.showfileUploaderEncoding', False)
data_file = st.file_uploader("Upload CSV", type = ['csv'])
if data_file is not None:
df = pd.read_csv(data_file)
st.dataframe(df.head())
profile = ProfileReport(df)
st_profile_report(profile)
elif choice == "Sweetviz":
st.subheader("Automated Exploratory Data Analysis (with Sweetviz)")
st.write("Sweetviz is an open source Python library that generates beautiful, high-density visualizations to kickstart EDA (Exploratory Data Analysis). Output is a fully self-contained HTML application.The system is built around quickly visualizing target values and comparing datasets. Its goal is to help quick analysis of target characteristics, training vs testing data, and other such data characterization tasks.")
data_file = st.file_uploader("Upload CSV", type = ['csv'])
st.set_option('deprecation.showfileUploaderEncoding', False)
if data_file is not None:
df = pd.read_csv(data_file)
st.dataframe(df.head())
#Normal Workflow for sweetviz
report = sv.analyze(df)
report.show_html()
st_display_sweetviz("SWEETVIZ_REPORT.html")
| yashpupneja/StreamAI | DS_pandas_profiling.py | DS_pandas_profiling.py | py | 2,412 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "codecs.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.components.v1.html",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.components.v1",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "stre... |
36619014009 | # pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring
import torch
import e3nn.point.data_helpers as dh
from e3nn import rs
import numpy as np
torch.set_default_dtype(torch.float64)
def test_data_helpers():
N = 7
lattice = torch.randn(3, 3)
pos = torch.randn(N, 3)
Rs_in = [(3, 0), (1, 1)]
x = torch.randn(N, rs.dim(Rs_in))
r_max = 1
dh.neighbor_list_and_relative_vec_lattice(pos, lattice, r_max)
dh.DataPeriodicNeighbors(x, Rs_in, pos, lattice, r_max)
dh.neighbor_list_and_relative_vec(pos, r_max)
dh.DataNeighbors(x, Rs_in, pos, r_max)
def test_silicon_neighbors():
lattice = torch.tensor([
[3.34939851, 0. , 1.93377613],
[1.11646617, 3.1578432 , 1.93377613],
[0. , 0. , 3.86755226]
])
coords = torch.tensor([
[0. , 0. , 0. ],
[1.11646617, 0.7894608 , 1.93377613]
])
r_max = 2.5
edge_index, edge_attr = dh.neighbor_list_and_relative_vec_lattice(coords, lattice, r_max=r_max)
edge_index_true = torch.LongTensor([
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0]
])
torch.allclose(edge_index, edge_index_true)
def test_get_edge_edges_and_index():
edge_index = torch.LongTensor([
[0, 0, 0, 1, 1, 1, 2, 2, 2],
[0, 1, 2, 0, 1, 2, 0, 1, 2]
])
edge_index_dict_asym, _, edge_edge_index_asym = dh.get_edge_edges_and_index(edge_index, symmetric_edges=False)
edge_index_dict_symm, _, edge_edge_index_symm = dh.get_edge_edges_and_index(edge_index, symmetric_edges=True)
check1 = {(0, 0): 0, (0, 1): 1, (0, 2): 2, (1, 0): 3, (1, 1): 4, (1, 2): 5, (2, 0): 6, (2, 1): 7, (2, 2): 8}
check2 = {(0, 0): 0, (0, 1): 1, (0, 2): 2, (1, 1): 3, (1, 2): 4, (2, 2): 5}
assert edge_index_dict_asym == check1
assert edge_index_dict_symm == check2
assert np.max(list(edge_index_dict_asym.values())) == np.max(edge_edge_index_asym)
assert np.max(list(edge_index_dict_symm.values())) == np.max(edge_edge_index_symm)
def test_initialize_edges():
edge_index = torch.LongTensor([[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]])
edge_index_dict, _, _ = dh.get_edge_edges_and_index(edge_index, symmetric_edges=True)
_, Rs = dh.initialize_edges(torch.ones(5, 1), [(1, 0, 1)], torch.randn(5, 3), edge_index_dict, 2, symmetric_edges=True)
assert Rs == [(1, 0, 1), (1, 1, -1), (1, 2, 1)]
_, Rs = dh.initialize_edges(torch.ones(5, 3), [(1, 1, -1)], torch.randn(5, 3), edge_index_dict, 0, symmetric_edges=True)
assert Rs == [(1, 0, 1), (1, 2, 1)]
edge_index_dict, _, _ = dh.get_edge_edges_and_index(edge_index, symmetric_edges=False)
_, Rs = dh.initialize_edges(torch.ones(5, 3), [(1, 1, -1)], torch.randn(5, 3), edge_index_dict, 0, symmetric_edges=False)
assert Rs == [(1, 0, 1), (1, 1, 1), (1, 2, 1)]
def test_DataEdgeNeighbors():
square = torch.tensor(
[[0., 0., 0.], [1., 0., 0.], [1., 1., 0.], [0., 1., 0.]]
)
square -= square.mean(-2)
data = dh.DataEdgeNeighbors(torch.ones(4, 1), [(1, 0, 1)], square, 1.5, 2)
assert list(data.edge_x.shape) == [16, 9]
assert list(data.edge_edge_index.shape) == [2, 64]
assert list(data.edge_edge_attr.shape) == [64, 3]
def test_DataEdgePeriodicNeighbors():
pos = torch.ones(1, 3) * 0.5
lattice = torch.eye(3)
dh.DataEdgePeriodicNeighbors(torch.ones(1, 1), [(1, 0, 1)], pos, lattice, 1.5, 2)
| clementbernardd/ares_fork | lib/ares/e3nn_ares/tests/point/data_helpers_test.py | data_helpers_test.py | py | 3,520 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.set_default_dtype",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.float64",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.randn",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.randn",
... |
25163452547 | #!/usr/bin/env python
import typer
import logging
import os
# logging.basicConfig(level=logging.INFO, format="%(asctime)s %(filename)s: %(levelname)6s %(message)s")
#
# LOG = logging.getLogger(__name__)
from easul.driver import MemoryDriver
app = typer.Typer(help="EASUL tools to manage and extend the abilities of the library. Most of the tools are related to the running and monitoring the engine.", pretty_exceptions_enable=False)
@app.command(help="View visuals for a specific step")
def view_visual(plan_module, stepname:str):
from easul.util import create_package_class
plan = create_package_class(plan_module)
step = plan.steps[stepname]
driver = MemoryDriver.from_reference("VISUAL")
html = step.render_visual(driver, plan.steps)
import tempfile
fd = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
fd.write(str(html).encode("utf8"))
fd.close()
os.system(f"open {fd.name}")
@app.command(help="Regenerate model algorithm and context data for EASUL tests.", epilog="NOTE: Only use this if files are lost or corrupted - it may require changes to tests.")
def regenerate_test_models():
from easul.manage.regenerate import generate_test_models
generate_test_models()
@app.command(help="Run EASUL engine according to provided configuration")
def run_engine(plan_module:str, engine_module:str):
from easul.util import create_package_class
plan = create_package_class(plan_module)()
engine = create_package_class(engine_module)()
engine.run(plan)
@app.command(help="Monitor EASUL broker for supplied plan/engine")
def monitor_broker(plan_module:str, engine_module:str):
from easul.util import create_package_class
plan = create_package_class(plan_module)()
engine = create_package_class(engine_module)()
from easul.manage.monitor import monitor_client
monitor_client(engine, plan)
if __name__ == "__main__":
app()
| rcfgroup/easul | manage.py | manage.py | py | 1,926 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typer.Typer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "easul.util.create_package_class",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "easul.driver.MemoryDriver.from_reference",
"line_number": 20,
"usage_type": "call"
},
{
... |
31948081711 | import requests
import streamlit as st
st.title("Weather Report ☁️")
def kelvin_to_celsius(kelvin):
return kelvin - 273.15
def kelvin_to_fahrenheit(kelvin):
return (kelvin - 273.15) * 9/5 + 32
def get_wind_direction(degrees):
directions = ["North", "North-East", "East", "South-East", "South", "South-West", "West", "North-West"]
index = int((degrees + 22.5) / 45) % 8
return directions[index]
def main():
try:
city = st.text_input("Enter Your City")
if st.button("Check"):
api_key = "b1d2ededf0d77faf89a0c7e0a3acc4d1"
final_url = "http://api.openweathermap.org/data/2.5/weather?q={}&appid={}".format(city, api_key)
result = requests.get(final_url)
data = result.json()
if data['cod'] == '404':
st.error("City not found.")
return
temperature_kelvin = data['main']['temp']
temperature_celsius = round(kelvin_to_celsius(temperature_kelvin))
temperature_fahrenheit = round(kelvin_to_fahrenheit(temperature_kelvin))
humidity = data['main']['humidity']
pressure = data['main']['pressure']
wind_speed = data['wind']['speed']
wind_direction_degrees = data['wind']['deg']
wind_direction_cardinal = get_wind_direction(wind_direction_degrees)
cordinatelon = data['coord']['lon']
cordinatelat = data['coord']['lat']
visibility = data.get('visibility')
wind_speed = data['wind']['speed']
weather_condition = data['weather'][0]['description']
st.subheader(f"Weather in {city}:")
st.text(f"Temperature: {temperature_celsius} °C ({temperature_fahrenheit:.2f} °F)")
st.text(f"Humidity: {humidity}%")
st.text(f"Wind Speed: {wind_speed*3.6:.2f} km/h")
st.text(f"Wind Direction: {wind_direction_cardinal}")
st.text(f"Weather Condition: {weather_condition.capitalize()}")
st.text(f"Latitude: {cordinatelat}")
st.text(f"Longitude: {cordinatelon}")
st.text(f"Pressure: {pressure} mb")
if visibility:
st.text(f"Visibility: {visibility / 1000:.2f} km")
else:
st.text("Visibility data not available.")
except(KeyError):
st.error("Please Enter the City Name")
if __name__ == "__main__":
main() | Yashwanth-2701/Weather-Report | app.py | app.py | py | 2,512 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.title",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "streamlit.button",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
... |
43570030497 | import warnings
from pymysql.tests import base
import pymysql.cursors
class CursorTest(base.PyMySQLTestCase):
def setUp(self):
super(CursorTest, self).setUp()
conn = self.connections[0]
self.safe_create_table(
conn,
"test", "create table test (data varchar(10))",
cleanup=True)
cursor = conn.cursor()
cursor.execute(
"insert into test (data) values "
"('row1'), ('row2'), ('row3'), ('row4'), ('row5')")
cursor.close()
self.test_connection = pymysql.connect(**self.databases[0])
self.addCleanup(self.test_connection.close)
def test_cleanup_rows_unbuffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.SSCursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
with warnings.catch_warnings(record=True) as log:
warnings.filterwarnings("always")
c2.execute("select 1")
self.assertGreater(len(log), 0)
self.assertEqual(
"Previous unbuffered result was left incomplete",
str(log[-1].message))
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
def test_cleanup_rows_buffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.Cursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
c2.execute("select 1")
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
| PyMySQL/Tornado-MySQL | tornado_mysql/tests/test_cursor.py | test_cursor.py | py | 1,959 | python | en | code | 408 | github-code | 36 | [
{
"api_name": "pymysql.tests.base.PyMySQLTestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pymysql.tests.base",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pymysql.tests.connect",
"line_number": 20,
"usage_type": "call"
},
{
"ap... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.