seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3471857383 | import datetime
def message(msg):
print("\n------------------- "+msg.title()+" -------------------\n")
def box_message(msg):
s = "-" * (len(msg)+6)
print(s + "\n| "+msg.title()+" |\n"+ s)
def transformDate(date):
try:
splitted = list(map(lambda item : int(item), date.split("-")))
if(len(splitted) != 5):
return None
year = splitted[0]
month = splitted[1]
day = splitted[2]
hour = splitted[3]
minute = splitted[4]
return datetime.datetime(year, month, day, hour, minute).strftime("%c")
except:
return None
| trset/Auction-System | utils.py | utils.py | py | 621 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "call"
}
] |
25234742924 | import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPool2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import matplotlib.pyplot as plt
input_shape = (160, 320, 1)
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=input_shape),
MaxPool2D(pool_size=(1,2)),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPool2D(pool_size=(1,2)),
Conv2D(128, 3, padding='same', activation='relu'),
MaxPool2D(pool_size=(1,2)),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.1),
Dense(3,activation='softmax')
])
model.compile(optimizer='adam',loss='categorical_crossentropy', metrics=['accuracy'])
earlyStopping = EarlyStopping(monitor='val_accuracy', patience=10, verbose=0, mode='max')
mcp_save = ModelCheckpoint('D:\Project\dino_4', save_best_only=True, monitor='val_accuracy', mode='max')
datagen = ImageDataGenerator(validation_split=0.1, rescale=1./255)
train_generator = datagen.flow_from_directory(
'D:/Project/images',
target_size=(160, 320),
batch_size=1,
subset='training',
color_mode = 'grayscale',
class_mode='categorical')
validation_generator = datagen.flow_from_directory(
'D:/Project/images',
target_size=(160, 320),
batch_size=1,
subset='validation',
color_mode = 'grayscale',
class_mode='categorical')
history = model.fit(train_generator,
epochs=60,
validation_data=validation_generator,callbacks=[earlyStopping, mcp_save])
acc = history.history[ 'accuracy' ]
val_acc = history.history[ 'val_accuracy' ]
loss = history.history[ 'loss' ]
val_loss = history.history['val_loss' ]
epochs = range(len(acc))
plt.plot ( epochs, acc,'y', label='Training acc' )
plt.plot ( epochs, val_acc,'r', label='validation acc' )
plt.title ('Training and validation accuracy')
plt.legend()
plt.figure()
# Plot training and validation loss per epoch
plt.plot ( epochs, loss,'y', label='Training loss' )
plt.plot ( epochs, val_loss,'r', label='Validation loss' )
plt.title ('Training and validation loss' )
plt.legend()
plt.figure()
model.save('D:\Project\dino-t.h5')
| Ashish2Parimi/Chrome-Bot | Network.py | Network.py | py | 2,533 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.MaxPool2D",
"line_number": 14,
"usage_type": "c... |
4909866886 | from tkinter import *
from tkinter import ttk
import sqlite3
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import SimpleDocTemplate,Image
import webbrowser
root = Tk()
class Relatorios():
def mostrar(self):
webbrowser.open('ficha_Cliente_'+self.nomerel+'.pdf')
def Gerar_Ficha(self):
self.codigorel = self.entry_codigo.get()
self.nomerel = self.entry_nome.get()
self.telefonerel = self.entry_telfone.get()
self.cidaderel = self.entry_cidade.get()
self.ficha_cliente = canvas.Canvas('ficha_Cliente_'+self.nomerel+'.pdf')
self.ficha_cliente.setFont("Helvetica-Bold",20)
self.ficha_cliente.drawString(200,780,'FICHA DO CLIENTE')
self.ficha_cliente.setFont("Helvetica-Bold",20)
self.ficha_cliente.drawString(50,680,'Código: '+self.codigorel)
self.ficha_cliente.drawString(50, 650, 'Nome: ' + self.nomerel)
self.ficha_cliente.drawString(50, 620, 'Telefone: ' + self.telefonerel)
self.ficha_cliente.drawString(50, 590, 'Cidade: ' + self.cidaderel)
self.ficha_cliente.rect(20,430,550,400, fill=False,stroke=True)
self.ficha_cliente.showPage()
self.ficha_cliente.save()
self.mostrar()
class Funcoes():
def limpar_campos(self):
self.entry_codigo.delete(0, END)
self.entry_nome.delete(0, END)
self.entry_telfone.delete(0, END)
self.entry_cidade.delete(0, END)
def db_conect(self):
self.conexao = sqlite3.connect('clientes_bd.bd')
self.cursor = self.conexao.cursor()
print("conectando ao banco de dados");
def db_desconect(self):
self.conexao.close();print("Desconectando ao banco de dados sqlite3");
def criar_tabela(self):
self.db_conect();
#Criando uma tabela se ela não existir
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS clientes(
id INTEGER PRIMARY KEY AUTOINCREMENT,
Nome VARCHAR(50) NOT NULL,
telefone INTEGER(11) NOT NULL,
cidade VARCHAR(40));""");
self.conexao.commit(); print("banco de dados criado");
self.db_desconect()
def capturar_campos(self):
self.codigo = self.entry_codigo.get()
self.nome = self.entry_nome.get()
self.telefone = self.entry_telfone.get()
self.cidade = self.entry_cidade.get()
def add_cliente(self):
#obter dados dos campos
self.capturar_campos()
self.db_conect()
self.cursor.execute("""INSERT INTO clientes (nome,telefone,cidade)
VALUES(?,?,?)""",(self.nome,self.telefone,self.cidade))
self.conexao.commit()
self.db_desconect()
self.select_lista()
self.limpar_campos()
def select_lista(self):
self.lista_grid.delete(*self.lista_grid.get_children())
self.db_conect()
lista = self.cursor.execute("""SELECT id , nome,telefone,cidade
FROM clientes ORDER BY nome ASC;""")
for l in lista:
self.lista_grid.insert("",END,values=l)
self.db_desconect()
def OnDubleClick(self,event):
self.limpar_campos()
self.lista_grid.selection()
for x in self.lista_grid.selection():
col1,col2,col3,col4 = self.lista_grid.item(x,'values')
self.entry_codigo.insert(END, col1)
self.entry_nome.insert(END, col2)
self.entry_telfone.insert(END, col3)
self.entry_cidade.insert(END, col4)
def deleta_cliente(self):
self.capturar_campos()
self.db_conect()
self.cursor.execute("""DELETE FROM clientes WHERE id = ?""",(self.codigo))
self.conexao.commit()
self.db_desconect()
self.limpar_campos()
self.select_lista()
def alterar_cliente(self):
self.capturar_campos()
self.db_conect()
self.cursor.execute("""UPDATE clientes SET nome = ?, telefone = ?, cidade = ?
WHERE id = ?;
""",(self.nome,self.telefone,self.cidade,self.codigo))
self.conexao.commit()
self.db_desconect()
self.limpar_campos()
self.select_lista()
def Buscar_Cliente(self):
self.db_conect()
self.lista_grid.delete(*self.lista_grid.get_children())
self.entry_nome.insert(END,'%')
nome = '%'+self.entry_nome.get()
self.cursor.execute("""SELECT * FROM clientes WHERE Nome LIKE '%s' COLLATE NOCASE ORDER BY Nome ASC"""%nome)
Resultado_busca = self.cursor.fetchall()
for cliente in Resultado_busca:
self.lista_grid.insert("",END,values=cliente)
self.db_desconect()
self.limpar_campos()
self.db_desconect()
class Aplication(Funcoes,Relatorios):
def __init__(self):
self.root = root
self.tela()
self.frames_tela()
self.grid_cliente()
self.widgets_frame1()
self.Menus()
self.criar_tabela()
self.select_lista()
root.mainloop()
def tela(self):
self.root.title("Cadastro de Clientes")
self.root.configure(background='#6a50c9')
self.root.geometry("800x600")
self.root.resizable(True, True)
self.root.maxsize(width=850, height=700)
self.root.minsize(width=400, height=300)
def frames_tela(self):
self.frame1 = Frame(self.root, bd=4, bg="#fff",
highlightbackground="#b471f8", highlightthickness=3)
self.frame1.place(relx=0.02, rely=0.02, relwidth=0.96, relheight=0.46)
self.frame2 = Frame(self.root, bd=4, bg="#fff",
highlightbackground="#b471f8", highlightthickness=3)
self.frame2.place(relx=0.02, rely=0.5, relwidth=0.96, relheight=0.46)
def widgets_frame1(self):
# botão limpar
self.bt_limpar = Button(self.frame1, text="Limpar",
bg="#583bbf", fg="white", font=('verdana', 8, 'bold'), command=self.limpar_campos)
self.bt_limpar.place(relx=0.2, rely=0.1, relwidth=0.1, relheight=0.15)
# botão Buscar
self.bt_buscar = Button(self.frame1, text="Buscar",
bg="#583bbf", fg="white", font=('verdana', 8, 'bold'),command=self.Buscar_Cliente)
self.bt_buscar.place(relx=0.3, rely=0.1, relwidth=0.1, relheight=0.15)
# botão Novo
self.bt_novo = Button(self.frame1, text="Novo",
bg="#583bbf", fg="white", font=('verdana', 8, 'bold'),command=self.add_cliente)
self.bt_novo.place(relx=0.6, rely=0.1, relwidth=0.1, relheight=0.15)
# Botão Altera
self.bt_alterar = Button(self.frame1, text="Alterar",
bg="#583bbf", fg="white", font=('verdana', 8, 'bold'),command=self.alterar_cliente)
self.bt_alterar.place(relx=0.7, rely=0.1, relwidth=0.1, relheight=0.15)
# Botão Apagar
self.bt_apagar = Button(self.frame1, text="Apagar",
bg="#583bbf", fg="white", font=('verdana', 8, 'bold'),command=self.deleta_cliente)
self.bt_apagar.place(relx=0.8, rely=0.1, relwidth=0.1, relheight=0.15)
# label e entry - codigo -----------------------------
self.lb_codigo = Label(self.frame1, text="Codigo",
bg="white", fg="#583bbf", font=('verdana', 10, 'bold'))
self.lb_codigo.place(relx=0.05, rely=0.05)
self.entry_codigo = Entry(self.frame1, text="Codigo",
bg="white", fg="#583bbf", font=('verdana', 10, 'bold'))
self.entry_codigo.place(relx=0.05, rely=0.15, relwidth=0.08)
# label e entry - nome ----------------------------------
self.lb_nome = Label(self.frame1, text="Nome",
bg="white", fg="#583bbf", font=('verdana', 10, 'bold'))
self.lb_nome.place(relx=0.05, rely=0.35)
self.entry_nome = Entry(self.frame1,
bg="white", fg="#583bbf", font=('verdana', 10, 'bold'))
self.entry_nome.place(relx=0.05, rely=0.45, relwidth=0.7)
# label e entry - Telfone--------------------------
self.lb_telfone = Label(self.frame1, text="Telfone",
bg="white", fg="#583bbf", font=('verdana', 10, 'bold'))
self.lb_telfone.place(relx=0.05, rely=0.6)
self.entry_telfone = Entry(self.frame1,
bg="white", fg="#583bbf", font=('verdana', 10, 'bold'))
self.entry_telfone.place(relx=0.05, rely=0.7, relwidth=0.4)
# label e entry - Cidade -----------------------
self.lb_cidade = Label(self.frame1, text="Cidade",
bg="white", fg="#583bbf", font=('verdana', 10, 'bold'))
self.lb_cidade.place(relx=0.5, rely=0.6)
self.entry_cidade = Entry(self.frame1,
bg="white", fg="#583bbf", font=('verdana', 10, 'bold'))
self.entry_cidade.place(relx=0.5, rely=0.7, relwidth=0.5)
def grid_cliente(self):
self.lista_grid = ttk.Treeview(self.frame2, height=3,
column=('col1', 'col2', 'col3', 'col4'))
self.lista_grid.heading("#0", text='')
self.lista_grid.heading("#1", text='CODIGO')
self.lista_grid.heading("#2", text='NOME')
self.lista_grid.heading("#3", text='TELEFONE')
self.lista_grid.heading("#4", text='CIDADE')
self.lista_grid.column("#0", width=1)
self.lista_grid.column("#1", width=25)
self.lista_grid.column("#2", width=200)
self.lista_grid.column("#3", width=125)
self.lista_grid.column("#4", width=125)
self.lista_grid.place(relx=0.005, rely=0.1, relwidth=0.95, relheight=0.86)
self.scrol_lista = Scrollbar(self.frame2, orient='vertical')
self.lista_grid.configure(yscroll=self.scrol_lista.set)
self.scrol_lista.place(relx=0.96, rely=0.1, relwidth=0.04, relheight=0.88)
self.lista_grid.bind("<Double-1>",self.OnDubleClick)
def Menus(self):
Menubar = Menu(self.root)
self.root.config(menu=Menubar)
filemenu = Menu(Menubar)
filemenu2 = Menu(Menubar)
def Quit(): self.root.destroy()
Menubar.add_cascade(label="opções",menu=filemenu)
Menubar.add_cascade(label="Funções", menu=filemenu2)
filemenu.add_command(label="Sair",command=Quit)
filemenu2.add_command(label="Limpar campos", command=self.limpar_campos)
filemenu2.add_command(label="Gerar Relatório", command=self.Gerar_Ficha)
Aplication() | Felipe500/CRUD-CADASTRO-CLIENTES-PYTHON-TKINTER-SQLITE | main.py | main.py | py | 10,777 | python | pt | code | 6 | github-code | 36 | [
{
"api_name": "webbrowser.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfgen.canvas.Canvas",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfgen.canvas",
"line_number": 23,
"usage_type": "name"
},
{
"api_name":... |
36278467986 | import numpy
import scipy.linalg
from utils import utils
def compute_Sw_Sb(D, L):
num_classes = L.max()+1
D_c = [D[:, L==i] for i in range(num_classes)]
n_c = [D_c[i].shape[1] for i in range(num_classes)]
mu = utils.vcol(D.mean(1))
mu_c = [utils.vcol(D_c[i].mean(1)) for i in range(len(D_c))]
S_w, S_b = 0, 0
for i in range(num_classes):
DC = D_c[i] - mu_c[i]
C_i = numpy.dot(DC, DC.T) / DC.shape[1]
S_w += n_c[i] * C_i
diff = mu_c[i] - mu
S_b += n_c[i] * numpy.dot(diff, diff.T)
S_w /= D.shape[1]
S_b /= D.shape[1]
# print(S_w)
# print(S_b)
return S_w, S_b
def project(base, D):
return numpy.dot(base.T, D)
def LDA(D, L, m):
"""perform dimensionality reduction with LDA (supervised)
Args:
D : dataset
L: labels
m : number of the dimension of the subspace of the original space (m must be smaller than #classes-1)
Returns:
original data projected on the subspace
"""
S_w, S_b = compute_Sw_Sb(D, L)
_, U = scipy.linalg.eigh(S_b, S_w)
W = U[:, ::-1][:, :m]
DP = project(W, D)
return DP
| aldopietromatera/2023_biometric_identity_verification_ML | BIV/biv/dimred/LDA.py | LDA.py | py | 1,155 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.utils.vcol",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "utils.utils.vcol",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utils.utils",
"line... |
26377828934 | from django.urls import path
from .views import *
urlpatterns = [
path('', BaseView.as_view(), name='base'),
path('catalog/all', CatalogView.as_view(), name='catalog'),
path('catalog/<str:slug>/', CategoryDetailView.as_view(), name='category_detail'),
path('catalog/<str:ct_model>/<str:slug>/', ProductDetailView.as_view(), name='product_description'),
path('brand/', brand_view, name='brand'),
path('fabrics/', fabrics_view, name='fabrics'),
path('contacts/', contacts_view, name='contacts'),
path('how_to_order', how_to_order_view, name='how_to_order'),
path('register/', RegisterView.as_view(), name='register'),
path('login/', AuthenticationView.as_view(), name='login'),
path('cart/', CartView.as_view(), name='cart'),
path('add-to-cart/<str:ct_model>/<str:slug>', AddToCartView.as_view(), name='add_to_cart'),
path('remove-from-cart/<str:ct_model>/<str:slug>', DeleteFromCartView.as_view(), name='delete_from_cart'),
path('change-qty/<str:ct_model>/<str:slug>', ChangeQTYView.as_view(), name='change_qty'),
path('checkout/', CheckoutView.as_view(), name='checkout'),
path('make-order/', MakeOrderView.as_view(), name='make_order')
]
| IvanPogorenko/MoonPie | todo/mainapp/urls.py | urls.py | py | 1,207 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
216944000 | # compose_flask/app.py
from flask import Flask
from redis import Redis
app = Flask(__name__)
redis = Redis(host='redis-container', port=6379)
@app.route('/')
def hello():
redis.incr('hits')
return ' - - - great has viewed {} time(s) - - -'.format(redis.get('hits'))
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5000)
| BigNews3/Docker | jour 2 - docker-compose/app/app.py | app.py | py | 356 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "redis.incr",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "redis.get",
"line_number": 11,
... |
29288381937 | from src.futureLoc import futureLoc
import pytest
from unittest.mock import Mock, patch
from datetime import datetime
import requests
infoMessage = "On the 25/10/2023 at MLA airport: \n"
coldMessage = "It will be cold so you should wear warm clothing.\n"
warmMessage = "It will be warm so you should wear light clothing.\n"
rainMessage = "It is likely to rain so you do need an umbrella.\n\n"
norainMessage = "It is likely to not rain so you don't need an umbrella.\n\n"
@patch("builtins.input", side_effect=["MLA", datetime.today().strftime("%Y-%m-%d")])
def test_getFututreInfoValidInputs(mock_input):
# Exercise
airport_out, date_out = futureLoc.getFutureInfo()
# Assert
assert airport_out == "MLA"
assert date_out.strftime("%Y-%m-%d") == datetime.today().strftime("%Y-%m-%d")
@patch(
"builtins.input",
side_effect=[
"123",
"airport",
"MLA",
"2023-10-25",
"2024-10-26",
datetime.today().strftime("%Y-%m-%d"),
],
)
def test_getFututreInfoInValidInputs(mock_input):
# Exercise
airport_out, date_out = futureLoc.getFutureInfo()
# Assert
assert airport_out == "MLA"
assert date_out.strftime("%Y-%m-%d") == datetime.today().strftime("%Y-%m-%d")
@patch("requests.get")
def test_getFutureLocDataSuccess(mock_requests):
# Setup
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"forecast": {
"forecastday": {0: {"day": {"avgtemp_c": 12, "totalprecip_mm": 0}}}
}
}
mock_requests.return_value = mock_response
# Exercise
temp, prec = futureLoc.getFutureLocData(
"MLA", datetime.today().strftime("%Y-%m-%d")
)
# Assert
assert temp == 12
assert prec == 0
@patch("requests.get")
def test_getFutureLocDataStatusCodeFailure(mock_requests):
# Setup
mock_response = Mock()
mock_response.status_code = 100
mock_requests.return_value = mock_response
# Exercise
temp, prec = futureLoc.getFutureLocData(
"MLA", datetime.today().strftime("%Y-%m-%d")
)
# Assert
assert temp == None
assert prec == None
@patch("requests.get")
def test_getFutureLocDataRequestFailure(mock_requests, capsys):
# Setup
mock_requests.side_effect = requests.exceptions.RequestException("Test Exception")
# Exercise
temp, prec = futureLoc.getFutureLocData(
"MLA", datetime.today().strftime("%Y-%m-%d")
)
# Assert
assert capsys.readouterr().out == "Error with WeatherAPI: Test Exception Exiting\n"
assert temp == None
assert prec == None
@pytest.mark.parametrize(
"loc, date, temp, prec, output",
[
(
"MLA",
datetime(2023, 10, 25),
12,
0,
infoMessage + coldMessage + norainMessage,
),
("MLA", datetime(2023, 10, 25), 12, 1, infoMessage + coldMessage + rainMessage),
(
"MLA",
datetime(2023, 10, 25),
16,
0,
infoMessage + warmMessage + norainMessage,
),
("MLA", datetime(2023, 10, 25), 16, 1, infoMessage + warmMessage + rainMessage),
],
)
def test_printMessage(loc, date, temp, prec, output, capsys):
# Exercise
futureLoc.printMessage(loc, date, temp, prec)
# Assert
assert capsys.readouterr().out == output
| benbezz02/SoftwareTestingAssignment1 | tests/futureLoc_test.py | futureLoc_test.py | py | 3,395 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.futureLoc.futureLoc.getFutureInfo",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "src.futureLoc.futureLoc",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 21,
"usage_type": "call"
},
{
... |
41242720350 | # -*- coding: utf-8 -*-
# @Author : lileilei
# @File : views.py
# @Time : 2017/12/7 12:19
from flask import Blueprint
from flask import redirect,request,render_template,url_for,flash,session,abort,jsonify,make_response
from flask.views import MethodView
from app import db
from app.models import *
from app.form import *
from flask.views import MethodView,View
from flask_login import current_user,login_required,login_user,logout_user
from app.common.decorators import admin_required,permission_required
from app import loginManager
from app.common.dict_com import comp_dict,dict_par
import json,time,os
from app.common.send_email import send_emails
from app.common.py_Html import createHtml
from app.test_case.Test_case import ApiTestCase
from app import scheduler
from app.common.Dingtalk import send_ding
def addtask(id):#定时任务执行的时候所用的函数
in_id=int(id)
task=Task.query.filter_by(id=in_id).first()
starttime = datetime.datetime.now()
star = time.time()
day = time.strftime("%Y%m%d%H%M", time.localtime(time.time()))
basedir = os.path.abspath(os.path.dirname(__file__))
file_dir = os.path.join(basedir, 'upload')
file = os.path.join(file_dir, (day + '.log'))
if os.path.exists(file) is False:
os.system('touch %s' % file)
filepath = os.path.join(file_dir, (day + '.html'))
if os.path.exists(filepath) is False:
os.system(r'touch %s' % filepath)
projecct_list = []
model_list = []
Interface_name_list = []
Interface_url_list = []
Interface_meth_list = []
Interface_pase_list = []
Interface_assert_list = []
Interface_headers_list = []
id_list = []
for task_yongli in task.interface.all():
id_list.append(task_yongli.id)
projecct_list.append(task_yongli.projects)
model_list.append(task_yongli.models)
Interface_url_list.append(task_yongli.Interface_url)
Interface_name_list.append(task_yongli.Interface_name)
Interface_meth_list.append(task_yongli.Interface_meth)
Interface_pase_list.append(task_yongli.Interface_pase)
Interface_assert_list.append(task_yongli.Interface_assert)
Interface_headers_list.append(task_yongli.Interface_headers)
apitest = ApiTestCase(Interface_url_list, Interface_meth_list, Interface_pase_list, Interface_assert_list, file,
Interface_headers_list)
result_toal, result_pass, result_fail, relusts, bask_list = apitest.testapi()
endtime = datetime.datetime.now()
end = time.time()
createHtml(titles=u'接口测试报告', filepath=filepath, starttime=starttime, endtime=endtime, passge=result_pass,
fail=result_fail, id=id_list, name=projecct_list, headers=Interface_headers_list,
coneent=Interface_url_list, url=Interface_meth_list, meth=Interface_pase_list,
yuqi=Interface_assert_list, json=bask_list, relusts=relusts)
hour = end - star
user_id = User.query.filter_by(role_id=2).first().id
new_reust = TestResult(Test_user_id=user_id, test_num=result_toal, pass_num=result_pass, fail_num=result_fail,
test_time=starttime, hour_time=hour, test_rep=(day + '.html'), test_log=(day + '.log'))
db.session.add(new_reust)
db.session.commit()
send_ding(content="%s定时任务执行完毕,测试时间:%s,\\n 通过用例:%s,失败用例:%s,\\n,详情见测试平台测试报告!" % (
task.taskname, starttime, result_pass, result_fail))
@loginManager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
def get_pro_mo():
projects=Project.query.all()
model=Model.query.all()
return projects,model
task = Blueprint('task', __name__)
class TestforTaskView(MethodView):#为测试任务添加测试用例
@login_required
def get(self,id):
procjet = Project.query.all()
task_one=Task.query.filter_by(id=id).first()
return render_template('add/addtestyongfortask.html', task_one=task_one, procjets=procjet)
@login_required
def post(self,id):
procjet = Project.query.all()
task_one = Task.query.filter_by(id=id).first()
proc_test=request.form.get('project')
if proc_test =='':
flash(u'不能不添加测试项目!')
return render_template('add/addtestyongfortask.html', task_one=task_one, procjets=procjet)
test_yongli=request.form.getlist('testyongli')
if test_yongli=='':
flash(u'亲你见过只有测试项目没有测试用例的测试任务吗!')
return render_template('add/addtestyongfortask.html', task_one=task_one, procjets=procjet)
for oldtask in task_one.interface.all():
task_one.interface.remove(oldtask)
task_one.prject=Project.query.filter_by(project_name=proc_test).first().id
for yongli in test_yongli:
task_one.interface.append(InterfaceTest.query.filter_by(id=yongli).first())
db.session.add(task_one)
try:
db.session.commit()
flash(u'任务更新用例成功')
return redirect(url_for('home.timingtask'))
except:
flash(u'任务更新用例失败')
return redirect(url_for('home.timingtask'))
class StartTaskView(MethodView):#开始定时任务
@login_required
def get(self,id):
task=Task.query.filter_by(id=id).first()
next = request.headers.get('Referer')
if len(task.interface.all())<=1:
flash(u'定时任务执行过程的测试用例为多用例,请你谅解')
return redirect(next or url_for('home.timingtask'))
try:
scheduler.add_job(func=addtask, id=str(id), args=str(id),trigger=eval(task.taskstart),replace_existing=True)
task.yunxing_status=u'启动'
db.session.commit()
flash(u'定时任务启动成功!')
return redirect(next or url_for('home.timingtask'))
except Exception as e:
flash(u'定时任务启动失败!请检查任务的各项内容各项内容是否正常')
return redirect(next or url_for('home.timingtask'))
class ZantingtaskView(MethodView):#暂停定时任务
@login_required
def get(self,id):
next = request.headers.get('Referer')
task = Task.query.filter_by(id=id).first()
try:
scheduler.pause_job(str(id))
task.yunxing_status = u'暂停'
db.session.commit()
flash(u'定时任务暂停成功!')
return redirect(next or url_for('home.timingtask'))
except:
task.yunxing_status = u'创建'
db.session.commit()
flash(u'定时任务暂停失败!已经为您初始化')
return redirect(next or url_for('home.timingtask'))
class HuifutaskView(MethodView):#回复定时任务
@login_required
def get(self,id):
task = Task.query.filter_by(id=id).first()
next = request.headers.get('Referer')
try:
scheduler.resume_job(str(id))
task.yunxing_status=u'启动'
db.session.commit()
flash(u'定时任务恢复成功!')
return redirect(next or url_for('home.timingtask'))
except:
task.yunxing_status = u'创建'
db.session.commit()
flash(u'定时任务恢复失败!已经为您初始化')
return redirect(next or url_for('home.timingtask'))
class YichuTaskView(MethodView):#移除定时任务
@login_required
def get(self,id):
next = request.headers.get('Referer')
task = Task.query.filter_by(id=id).first()
try:
scheduler.delete_job(str(id))
task.yunxing_status=u'关闭'
db.session.commit()
flash(u'定时任务移除成功!')
return redirect(next or url_for('home.timingtask'))
except:
task.yunxing_status = u'创建'
db.session.commit()
flash(u'定时任务移除失败!已经为您初始化')
return redirect(next or url_for('home.timingtask'))
class AddtimingtaskView(MethodView):
@login_required
def get(self):
return render_template('add/addtimingtasks.html')
@login_required
def post(self):
taskname=request.form['taskname']
tinmingtime=request.form['time']
to_email_data=request.form['to_email']
cao_email=request.form['cao_email']
weihu=request.form['weihu']
if taskname =='':
flash(u'任务名不能为空!')
return render_template('add/addtimingtasks.html')
if tinmingtime =='':
flash(u'任务执行时间不能为空!')
return render_template('add/addtimingtasks.html')
if to_email_data=='':
flash(u'发送给谁邮件不能为空!')
return render_template('add/addtimingtasks.html')
if weihu=='':
flash(u'维护人邮件不能为空!')
return render_template('add/addtimingtasks.html')
taskname_is = Task.query.filter_by(taskname=taskname).first()
if taskname_is:
flash(u'任务已经存在请重新填写!')
return render_template('add/addtimingtasks.html')
new_task=Task(taskname=taskname,taskstart=tinmingtime,taskrepor_to=to_email_data,taskrepor_cao=cao_email,task_make_email=weihu,
makeuser=current_user.id)
db.session.add(new_task)
try:
db.session.commit()
flash(u'添加定时任务成功')
return redirect(url_for('home.timingtask'))
except Exception as e:
db.session.rollback()
flash(u'添加过程貌似异常艰难!')
return redirect(url_for('home.addtimingtasks'))
class Editmingtaskview(MethodView):
@login_required
def get(self,id):
task_one=Task.query.filter_by(id=id).first()
procjet=Project.query.all()
if not task_one:
flash(u'你编辑的不存在')
return redirect(url_for('home.timingtask'))
return render_template('edit/Edittimingtasks.html', task_one=task_one, porjects=procjet)
def post(self,id):
task_one = Task.query.filter_by(id=id).first()
procjet = Project.query.all()
taskname = request.form['taskname']
tinmingtime = request.form['time']
to_email_data = request.form['to_email']
cao_email = request.form['cao_email']
weihu = request.form['weihu']
if taskname =='':
flash(u'任务名不能为空!')
return render_template('add/addtimingtasks.html')
if tinmingtime =='':
flash(u'任务执行时间不能为空!')
return render_template('add/addtimingtasks.html')
if to_email_data=='':
flash(u'发送给谁邮件不能为空!')
return render_template('add/addtimingtasks.html')
if weihu=='':
flash(u'维护人邮件不能为空!')
return render_template('add/addtimingtasks.html')
task_one.taskname=taskname
task_one.taskrepor_to=to_email_data
task_one.taskrepor_cao=cao_email
task_one.task_make_email=weihu
task_one.makeuser=current_user.id
try:
db.session.commit()
flash(u'编辑成功')
return redirect(url_for('home.timingtask'))
except:
db.session.rollback()
flash(u'编辑出现问题!')
return redirect(url_for('home.timingtask'))
class DeteleTaskViee(MethodView):
def get(self,id):
next = request.headers.get('Referer')
task_one = Task.query.filter_by(id=id).first()
if not task_one:
flash(u'你删除的不存在')
return redirect(next or url_for('home.timingtask'))
if task_one.status==True:
flash(u'已经删除')
return redirect(next or url_for('home.timingtask'))
task_one.status=True
try:
db.session.commit()
flash(u'删除任务成功')
return redirect(next or url_for('home.timingtask'))
except:
db.session.rollback()
flash(u'删除任务休息了')
return redirect(next or url_for('home.timingtask')) | mingming2513953126/pythondemo | FXTest-master/app/task/views.py | views.py | py | 12,466 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number":... |
17883605005 | import sys
from typing import TYPE_CHECKING, Callable, Dict, List
from PySide2.QtWidgets import QApplication
from PySide2.QtCore import Signal, QLocale, QTranslator
from lib.extensions.extensionlib import BaseExtension, BaseInterface
if TYPE_CHECKING:
pass
from lib.extensions.extensionlib import extension_lib
from .applications_toolbar import PMApplicationsToolBar
from .process_monitor import PMProcessConsoleTabWidget
from .manage_apps import APPManager, ToolAppDesc
import os
file_name = os.path.join(os.path.dirname(__file__), 'translations', 'qt_{0}.qm'.format(QLocale.system().name()))
app = QApplication.instance()
trans = QTranslator()
trans.load(file_name)
app.installTranslator(trans)
class Extension(BaseExtension):
if TYPE_CHECKING:
interface: 'ApplicationsInterface' = None
widget: 'PMApplicationsToolBar' = None
extension_lib: 'extension_lib' = None
def on_loading(self):
pass
# self.trans = self.extension_lib.UI.add_translation_file(
# os.path.join(os.path.dirname(__file__), 'translations', 'qt_{0}.qm'.format(QLocale.system().name())))
def on_load(self):
applications_toolbar: 'PMApplicationsToolBar' = self.widgets['PMApplicationsToolBar']
applications_toolbar.extension_lib = self.extension_lib
self.interface.extension_lib = self.extension_lib
self.applications_toolbar = applications_toolbar
self.interface.app_item_double_clicked_signal = applications_toolbar.app_item_double_clicked_signal
self.interface.app_item_double_clicked_signal.connect(self.interface.on_clicked)
self.interface.toolbar = applications_toolbar
self.interface.console_tab_widget: 'PMProcessConsoleTabWidget' = self.widgets['PMProcessConsoleTabWidget']
self.interface.console_tab_widget.set_extension_lib(self.extension_lib)
self.console_tab_widget = self.widgets['PMProcessConsoleTabWidget']
self.extension_lib.Signal.get_widgets_ready_signal().connect(self.bind_events)
self.applications_toolbar.console_tab_widget = self.console_tab_widget
def bind_events(self):
self.extension_lib.get_interface('file_tree').add_open_file_callback('.ui', lambda
name: self.applications_toolbar.open_in_designer(name))
self.extension_lib.get_interface('file_tree').add_open_file_callback('.ts', lambda
name: self.applications_toolbar.open_in_linguist(name))
class ApplicationsInterface(BaseInterface):
app_item_double_clicked_signal: 'Signal' = None
toolbar: 'PMApplicationsToolBar' = None
console_tab_widget: 'PMProcessConsoleTabWidget' = None
def on_clicked(self, name: str):
print('interface', name)
def add_app(self, group: str, text: str, icon_path: str, callback: Callable, hint: str = ''):
"""
添加一个绘图按钮。name表示按钮的名称,text表示按钮的文字,icon_path表示按钮的图标路径,callback表示按钮的回调函数
hint表示的就是按钮鼠标悬浮时候的提示文字。
例如:
extension_lib.get_interface('applications_toolbar').add_app('aaaaaa','hahahaahahah',
':/pyqt/source/images/lc_searchdialog.png',lambda :print('123123123'))
"""
self.toolbar.add_toolbox_widget(group, text, icon_path, action=callback, hint=hint, refresh=True)
def add_process_action(self, group: str, text: str, icon_path: str, process_args: list, hint: str = ''):
"""
添加一个绘图按钮。name表示按钮的名称,text表示按钮的文字,icon_path表示按钮的图标路径,callback表示按钮的回调函数
hint表示的就是按钮鼠标悬浮时候的提示文字。
例如:
extension_lib.get_interface('applications_toolbar').app_toolbar_interface.add_process_action('应用测试', '拟合工具',
os.path.join(path, 'src', 'cftool.png'),
['python', '-u', os.path.join(path, 'start_cftool.py')])
"""
def callback():
self.console_tab_widget.create_process(text, process_args)
self.extension_lib.UI.raise_dock_into_view('process_console_tab')
# self.toolbar.show_apps_button_bar.add_button(text, icon_path, btn_action=callback)
def create_process(self, text: str, process_args: List[str]):
print(text, process_args)
self.console_tab_widget.create_process(text, process_args)
self.extension_lib.UI.raise_dock_into_view('process_console_tab')
def create_python_file_process(self, file_name, interpreter_path='', args: List[str] = None):
if args is None:
args = []
if interpreter_path == '':
interpreter_path = sys.executable
command_list = [interpreter_path, file_name, '-u'] + args
self.create_process(os.path.basename(file_name), process_args=command_list)
def create_instant_boot_python_file_process(self, file_name, interpreter_path=''):
self.console_tab_widget.create_instant_boot_process(file_name, interpreter_path)
self.extension_lib.UI.raise_dock_into_view('process_console_tab')
| pyminer/pyminer | pyminer/packages/applications_toolbar/main.py | main.py | py | 5,307 | python | en | code | 77 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"... |
10770344019 | import subprocess
import logging
import cliff.command
import configparser
import workflowlister
import json
import os
import sys
class Generator(cliff.command.Command):
"This Generator will generate new job orders based on the contents of ~/ini-dir. Be aware that it will also rewrite your params.json file and your ~.youxia/config file."
log = logging.getLogger(__name__)
def get_parser(self,prog_name):
parser = super(Generator,self).get_parser(prog_name)
parser.add_argument('--workflow',dest='workflow_name',help='The name of the workflow for which you would like to generate jobs.',required=True)
parser.add_argument('--force',dest='force_generate',help='Force the generation of the jobs, even if the system detects that the job has already been generated once before.', required=False, action='store_true')
parser.add_argument('--keep_failed',dest='keep_failed',help='Keep failed workers in the fleet. Useful for debugging workflows.', required=False, action='store_true')
parser.add_argument('--os_env_name',dest='os_env_name',help='The name of the OpenStack environment you\'re working in. Only useful if you are working in OpenStack. If you omit this, you will be prompted for this information when necessary.', required=False)
#parser.add_argument('--no_config_update',dest='no_config_update',help='Do not update any configuration files.', required=False, action='store_true')
#parser.add_argument('--uses_gnos', dest='use_gnos',help='Indicates that your worfklow will be using GNOS repositories. --use_gnos and --use_s3 are not mutually exclusive - you could configure your workflow\'s INI file to use both GNOS and AWS S3 repositories.',required=False, default=True, choices=[True,False])
#parser.add_argument('--uses_s3', dest='use_s3',help='Indicates that your worfklow will be using S3 repositories. --use_gnos and --use_s3 are not mutually exclusive - you could configure your workflow\'s INI file to use both GNOS and AWS S3 repositories.',required=False, default=False, choices=[True,False])
return parser
def take_action(self, parsed_args):
workflow_name = vars(parsed_args)['workflow_name']
force_generate = vars(parsed_args)['force_generate']
keep_failed = vars(parsed_args)['keep_failed']
os_env_name = vars(parsed_args)['os_env_name']
self.log.debug('workflow_name: %s',workflow_name)
self.log.debug('all workflows: '+workflowlister.WorkflowLister.get_workflow_names())
cloud_env = os.environ['HOST_ENV'].upper()
if workflow_name in workflowlister.WorkflowLister.get_workflow_keys():
# First thing: if the directory is empty, don't bother going any further.
if os.listdir(path='/home/ubuntu/ini-dir') != []:
sysconfig_cmd = 'pancancer sysconfig'
return_code = subprocess.call(sysconfig_cmd.split(' '))
if return_code != 0:
self.log.warn('Attempt to (re)configure system may have encountered an error...')
master_config_path = os.path.expanduser('~/arch3/config/masterConfig.ini')
self.log.debug('setting check_previous_job_hash to false in '+master_config_path)
master_config = configparser.ConfigParser()
master_config.optionxform = str
# We have to update the master config file so that the generator process will allow duplicates.
master_config.read(master_config_path)
# if --force then do NOT check previous hash
master_config['generator']['check_previous_job_hash']=str(not force_generate).lower()
# keep_failed==true -> reap_failed_workers=false
master_config['provision']['reap_failed_workers']=str(not keep_failed).lower()
with open(master_config_path,'w') as master_config_file:
master_config.write(master_config_file,space_around_delimiters=True)
workflow_details = workflowlister.WorkflowLister.get_workflow_details(workflow_name)
if cloud_env == 'AWS' :
cloud_specific_details = workflow_details['cloud-specific-details']['aws']
elif cloud_env == 'OPENSTACK' :
# If there is only one OpenStack choice, we'll just go with that.
self.log.debug('OpenStack options: '+str(workflow_details['cloud-specific-details']['openstack']))
if len(workflow_details['cloud-specific-details']['openstack'].keys())==0:
print('No OpenStack environments are available. Exiting.')
sys.exit(1)
elif len(workflow_details['cloud-specific-details']['openstack'].keys())==1:
k = list(workflow_details['cloud-specific-details']['openstack'].keys())[0]
cloud_specific_details = workflow_details['cloud-specific-details']['openstack'][k]
else:
# Check to see if the user did not provide an OpenStack environment name, or if it's not in the list of *actual* names
if os_env_name is None or os_env_name not in workflow_details['cloud-specific-details']['openstack']:
print('Please enter one of the following OpenStack configurations that are available for this workflow:')
for k in workflow_details['cloud-specific-details']['openstack']:
print(k)
user_value = input().strip()
while user_value.strip() == '' or user_value not in workflow_details['cloud-specific-details']['openstack']:
print('Sorry, but \''+user_value+'\' was not a valid value. Please try again; valid values are: ')
for k in workflow_details['cloud-specific-details']['openstack']:
print(k)
user_value = input().strip()
cloud_specific_details = workflow_details['cloud-specific-details']['openstack'][user_value]
else:
# If the user provided an OpenStack environment name and it's legit, just use it.
cloud_specific_details = workflow_details['cloud-specific-details']['openstack'][os_env_name]
elif cloud_env == 'AZURE' :
cloud_specific_details = workflow_details['cloud-specific-details']['azure']
else:
self.log.error("Unrecognized cloud environment: "+cloud_env)
workflow_version = ''
if 'http_workflow' in workflow_details:
workflow_version = workflow_details['http_workflow']['version']
else:
workflow_version = workflow_details['s3_workflow']['version']
generator_cmd = 'Generator --workflow-name '+workflow_name+' --workflow-version '+workflow_version+' --workflow-path '+'/workflows/'+workflow_details['full_name']+' --ini-dir '+'/home/ubuntu/ini-dir --config /home/ubuntu/arch3/config/masterConfig.ini'
self.log.debug('generator command will be: '+generator_cmd)
# Before generating the job, we have to update params.json with the workflow/container info about the requested workflow.
paramsData=''
with open('/home/ubuntu/params.json','r') as params_file:
paramsData = json.load(params_file)
if 'http_workflow' in workflow_details:
paramsData['http_workflows'] = {}
paramsData['http_workflows'][workflow_name] = workflow_details['http_workflow']
paramsData['http_workflows'][workflow_name]['name'] = workflow_details['full_name']
if 's3_workflow' in workflow_details:
paramsData['s3_workflows'] = {}
paramsData['s3_workflows'][workflow_name] = workflow_details['s3_workflows']
paramsData['s3_workflows'][workflow_name]['name'] = workflow_details['full_name']
if 'containers' in workflow_details:
paramsData['containers'] = workflow_details['containers']
if 's3_containers' in workflow_details:
paramsData['s3_containers'] = workflow_details['s3_containers']
if 'http_containers' in workflow_details:
paramsData['http_containers'] = workflow_details['http_containers']
paramsData['lvm_device_whitelist']=cloud_specific_details['lvm_devices']
if paramsData['lvm_device_whitelist'] == "" or paramsData['lvm_device_whitelist'].strip() == "":
paramsData['single_node_lvm'] = "false"
else:
paramsData['single_node_lvm'] = "true"
# if --force then do NOT check previous hash
#paramsData['generator']['check_previous_job_hash']=str(not force_generate)
# keep_failed==true -> reap_failed_workers=false
#paramsData['provision']['reap_failed_workers']=str(not keep_failed)
#
paramsData['workflow_name'] = workflow_name
# Now write the params.json file.
with open('/home/ubuntu/params.json','w+') as params_file:
params_file.write(str(json.dumps(paramsData,sort_keys=True, indent=4) ))
# Update the youxia config file with the correct AMI and instance-type
config = configparser.ConfigParser()
config.read('/home/ubuntu/.youxia/config')
if cloud_env == 'AWS':
config['deployer']['instance_type']=cloud_specific_details['instance-type']
config['deployer']['ami_image']=cloud_specific_details['image']
elif cloud_env == 'OPENSTACK':
config['deployer_openstack']['flavor']=cloud_specific_details['instance-type']
config['deployer_openstack']['image_id']=cloud_specific_details['image']
elif cloud_env == 'AZURE':
config['deployer_azure']['flavor']=cloud_specific_details['instance-type']
config['deployer_azure']['image_name']=cloud_specific_details['image']
with open('/home/ubuntu/.youxia/config','w') as youxia_configfile:
config.write(youxia_configfile,space_around_delimiters=True)
provisioner_cmd = 'pancancer provisioner restart'
return_code = subprocess.call(provisioner_cmd.split(' '))
if return_code != 0:
self.log.warn('Attempt to restart the provisioner may have encountered an error...')
#sysconfig = SysConfig(SysConfig)
#parsed_args = sysconfig.get_parser('SysConfig').parse_args('')
#sysconfig.run()
#provisioner = Provisioner()
#provisioner.run(self, provisioner_cmd.split(' '))
return_code = subprocess.call(generator_cmd.split(' '))
if return_code != 0:
self.log.warn('Attempt to generate jobs may have encountered an error...')
else:
self.log.info('Job requests have been generated for the '+workflow_name+' using the INIs in ~/ini-dir')
else:
self.log.info('/home/ubuntu/ini-dir is empty. Place your INI files here before attempting to run the generator.')
else:
self.log.info(workflow_name+' is not the name of an available workflow.\nPlease use the command \'workflows list\' to see the list of currently available workflows.')
| ICGC-TCGA-PanCancer/cli | scripts/commands/generator.py | generator.py | py | 12,326 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "cliff.command.command",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cliff.command",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "workflowli... |
6105238329 | import numpy as np
import cv2
cap = cv2.VideoCapture(1 + cv2.CAP_V4L)
cap.set(cv2.CAP_PROP_CONVERT_RGB, 0) # turn off RGB conversion
while(True):
# Capture frame-by-frame
_, frame = cap.read()
bf81 = np.array(frame // 16, dtype=np.uint8)
# Create the mask
#binary = cv2.imread('Masked_Image.png', 0)
_, binary = cv2.threshold(bf81, 50, 255, cv2.THRESH_BINARY)
im3 = cv2.bitwise_and(bf81, binary)
im3[binary == 0] = 0
Mean = cv2.mean(bf81, binary)
print('Mean =', *Mean[:1], sep=''), " \r",
cv2.imshow('Mask', binary)
cv2.imshow('Original', bf81)
cv2.imshow('Masked Image', im3)
# detect waitkey of q to quit
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break | maykef/Fluorometer | opencv_contours.py | opencv_contours.py | py | 736 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_V4L",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_CONVERT_RGB",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.ar... |
33540682823 | #!/usr/bin/env python3
import logging
from apache_beam.runners import DataflowRunner
from modules import combined_pipeline
def run(argv=None):
logging.getLogger().setLevel(logging.INFO)
p = combined_pipeline.create_pipeline()
pipeline_result = p.run(argv)
if not isinstance(p.runner, DataflowRunner):
pipeline_result.wait_until_finish()
if __name__ == "__main__":
run()
| HTTPArchive/data-pipeline | run_combined.py | run_combined.py | py | 405 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "modules.combined_pipeline.create_pipeline",
"line_number": 12,
"usage_type": "call"
},
{
"api... |
3449199506 | # -*- coding: utf-8 -*-
r"""
Módulo ``cpwc``
===============
O CPWC (*Coherent Plane Wave Compounding*) é um algoritmo utilizado para
reconstruir imagens quando o tipo de inspeção é por ondas planas (*plane
waves*). Nesse método, todos os elementos de um transdutor do tipo *array*
linear são disparados simultaneamente, criando uma única frente de onda que
ilumina o objeto sob inspeção, conforme ilustrado na
:numref:`fig_imaging_nde_pwi`.
.. figure:: figures/imaging/nde-pwi.png
:name: fig_imaging_nde_pwi
:width: 35 %
:align: center
Inspeção com ondas planas.
É possível transmitir ondas planas com inclinações, aplicando *delays* no
disparo dos elementos do transdutor, conforme indicado na
:numref:`fig_imaging_nde_pwi_inc`.
.. figure:: figures/imaging/nde-pwi-inc.png
:name: fig_imaging_nde_pwi_inc
:width: 35 %
:align: center
Inspeção com ondas planas.
O algoritmo CPWC produz uma imagem final somando as diversas imagens obtidas
a partir de ondas com ângulos diferentes. Cada imagem individual é formada
aplicando *delay*-e-soma nos sinais de A-scan, sendo que os *delays* aplicados
dependem da posição do ponto da imagem e do ângulo da onda plana. A
:numref:`fig_imaging_pwi` ilustra as distâncias percorridas por uma onda
emitida com uma inclinação :math:`\theta` em relação à superfície da peça.
.. figure:: figures/imaging/pwi.png
:name: fig_imaging_pwi
:width: 35 %
:align: center
Inspeção com ondas planas.
A distância :math:`d_\text{i}` que a onda percorre até atingir um ponto
:math:`(x, z)` é função da posição do ponto e da inclinação da onda:
.. math::
d_\text{i} = z\cos{\theta} + x\sin{\theta}.
Após atingir o ponto :math:`(x, z)`, a onda pode ser refletida. A distância
:math:`d_\text{v}` percorrida pela onda, do ponto :math:`(x, z)` até um
transdutor posicionado em :math:`(x_t, 0)` é:
.. math::
d_\text{v} = \sqrt{(x - x_t)^2 + z^2}.
O *delay* :math:`\tau` aplicado ao sinal do transdutor em :math:`x_t` é obtido
a partir da distância total percorrida pela onda e a sua velocidade :math:`c`
no meio:
.. math::
\tau_{x_t} = \frac{d_i + d_v}{c}.
Exemplo
-------
O *script* abaixo mostra o uso do algoritmo CPWC para a reconstrução de uma
imagem a partir de dados sintéticos, oriundos do simulador CIVA. (Assume-se
que os dados estão na mesma pasta em que o *script* é executado)
O *script* mostra o procedimento para realizar a leitura de um arquivo de
simulação, utilizando o módulo :mod:`framework.file_civa`; o processamento de
dados, utilizando os módulos :mod:`imaging.bscan` e :mod:`imaging.cpwc`; e o
pós-processamento de dados, utilizando o módulo :mod:`framework.post_proc`.
O resultado do *script* é uma imagem, comparando a imagem reconstruída com o
algoritmo B-scan e com o algoritmo CPWC. Além disso, a imagem mostra o
resultado do CPWC com pós-processamento.
.. plot:: plots/imaging/cpwc_example.py
:include-source:
:scale: 100
.. raw:: html
<hr>
"""
import numpy as np
import numba
from framework.data_types import DataInsp, ImagingROI, ImagingResult
from framework.utils import pwd_from_fmc
from surface.surface import Surface, SurfaceType, Lineparam
def cpwc_kernel(data_insp, roi=ImagingROI(), output_key=None, description="", sel_shot=0, c=None,
cmed=None, angles=np.arange(-10, 10 + 1, 1)):
"""Processa dados de A-scan utilizando o algoritmo CPWC.
Parameters
----------
data_insp : :class:`.data_types.DataInsp`
Dados de inspeção, contendo parâmetros de inspeção, da peça e do
transdutor, além da estrutura para salvar os resultados obtidos.
roi : :class:`.data_types.ImagingROI`
Região de interesse na qual o algoritmo será executado. As dimensões
da ROI devem estar em mm.
output_key : :class:`None` ou :class:`int`
Chave identificadora do resultado de processamento. O atributo
:attr:`.data_types.DataInsp.imaging_results` é um dicionário, capaz
de armazenar diversos resultados de processamento. A chave (*key*) é
um valor numérico que representa o ID do resultado, enquanto que o
valor (*value*) é o resultado do processamento. Se ``output_key`` for
``None``, uma nova chave aleatória é gerada e o resultado é armazenado
no dicionário. Se ``int``, o resultado é armazenado sob a chave
especificada, criando uma nova entrada caso a chave não exista no
dicionário ou sobrescrevendo os resultados anteriores caso a chave já
exista. Por padrão, é ``None``.
description : str
Texto descritivo para o resultado. Por padrão, é uma *string* vazia.
sel_shot : int
Parâmetro que refere-se ao disparo caso o transdutor tenha sido
deslocado.
c : int ou float
Velocidade de propagação da onda no objeto sob inspeção. Por
padrão, é None e nesse caso é obtido o valor do data_insp.
cmed : int ou float
Velocidade de propagação da onda no meio acoplante. Por
padrão, é None e nesse caso é obtido o valor do data_insp.
angles : :class:`np.ndarray`
Vetor com ângulos para executar o algoritmo de CPWC a partir de dados
de FMC. Por padrão, é um vetor [-10, -9,..., 10].
Returns
-------
:class:`int`
Chave de identificação do resultado (``output_key``).
Raises
------
TypeError
Se ``data_insp`` não for do tipo :class:`.data_types.DataInsp`.
TypeError
Se ``roi`` não for do tipo :class:`.data_types.ImagingROI`.
TypeError
Se ``output_key`` não for do tipo :class:`NoneType` ou se não for
possível realizar sua conversão para :class:`np.int32`.
TypeError
Se ``description`` não for do tipo :class:`str` ou se não for possível
realizar sua conversão para :class:`str`.
TypeError
Se ``sel_shot`` não for do tipo :class:`int` ou se não for possível
realizar sua conversão para :class:`int`.
TypeError
Se ``c`` não for do tipo :class:`float` ou se não for possível
realizar sua conversão para :class:`float`.
TypeError
Se ``angles`` não for do tipo :class:`np.ndarray`.
NotImplementedError
Se o tipo de captura (:attr:`.data_types.InspectionParams.type_capt`)
não for ``PWI`` ou ``FMC``.
"""
# Teste dos tipos dos parâmetros.
if type(data_insp) is not DataInsp:
raise TypeError("O argumento ``data_insp`` não é um objeto do tipo ``DataInsp``.")
if type(roi) is not ImagingROI:
raise TypeError("O argumento ``roi`` não é um objeto do tipo ``ImagingROI``.")
if output_key is not None:
try:
output_key = np.int32(output_key)
except ValueError:
raise TypeError("Não foi possível converter o argumento ``output_key`` para ``numpy.int32``.")
try:
description = str(description)
except Exception:
raise TypeError("Não foi possível converter o argumento ``description`` para o tipo ``str``.")
if cmed is None:
cmed = data_insp.inspection_params.coupling_cl
else:
try:
cmed = float(cmed)
except ValueError:
raise TypeError("Não foi possível converter o argumento ``cmed`` para o tipo ``float``.")
if c is None:
c = data_insp.specimen_params.cl
else:
try:
c = float(c)
except ValueError:
raise TypeError("Não foi possível converter o argumento ``c`` para o tipo ``float``.")
if type(angles) is not np.ndarray:
raise TypeError("O argumento ``angles`` não é do tipo ``np.ndarray``")
# --- Extração dos dados necessários para a execução do algoritmo. ---
# Posições transdutores
xt = 1e-3 * data_insp.probe_params.elem_center[:, 0]
# Amostragem e gate
ts = 1e-6 * data_insp.inspection_params.sample_time
tgs = 1e-6 * data_insp.inspection_params.gate_start
# Extração dos dados de A-scan. Se o ensaio for do tipo FMC, os dados de PWI
# são gerados a partir do conjunto de ângulos informado.
if data_insp.inspection_params.type_capt == "PWI":
# Inverte os dados para as dimensões da matriz de dados ficar [emissão, a-scan, transdutor]
theta = data_insp.inspection_params.angles / 180 * np.pi
pwdata = np.swapaxes(data_insp.ascan_data[:, :, :, sel_shot], 0, 1)
elif data_insp.inspection_params.type_capt == "FMC":
theta = angles / 180 * np.pi
fmcdata = data_insp.ascan_data[:, :, :, sel_shot]
pwdata = pwd_from_fmc(fmcdata, angles, xt, c, ts)
else:
raise NotImplementedError("Tipo de captura inválido. Só é permitido ``PWI`` e ``FMC``.")
# Dados da ROI
xr = 1e-3 * roi.w_points
zr = 1e-3 * roi.h_points
# --- INÍCIO DO ALGORITMO CPWC, desenvolvido por Marco. ---
# Dimensões dados
m = pwdata.shape[1]
n = pwdata.shape[2]
# Dimensões ROI
m_r = zr.shape[0]
n_r = xr.shape[0]
# Imagem
img = np.zeros((m_r * n_r, 1), dtype=pwdata.dtype)
for k, thetak in enumerate(theta):
data = np.vstack((pwdata[k], np.zeros((1, n)))).astype(pwdata.dtype)
# Calcula a distância percorrida pela onda até cada ponto da ROI e de
# volta para cada transdutor. As distâncias são convertidas em delays
# e então em índices. Cada linha da variável j representa um pixel na
# imagem final, contendo o índice das amostras do sinal de Ascan para
# todos os transdutores que contribuem para esse pixel.
if data_insp.inspection_params.type_insp == 'immersion':
j = cpwc_roi_dist_immersion(xr, zr, xt, thetak, c, cmed, ts, tgs, data_insp.surf)
else:
j = cpwc_roi_dist(xr, zr, xt, thetak, c, ts, tgs)
j = j.reshape(m_r * n_r, n)
j[j >= m] = -1
j[j < 0] = -1
# Soma as amostras de Ascan coerentemente
aux = np.zeros(j.shape[0], dtype=pwdata.dtype)
img[:, 0] += cpwc_sum(data, aux, j)
f = img.reshape((m_r, n_r), order='F')
# --- FIM DO ALGORITMO CPWC. ---
# Salva o resultado.
if output_key is None:
# Cria um objeto ImagingResult com o resultado do algoritmo e salva a imagem reconstruída.
result = ImagingResult(roi=roi, description=description)
result.image = f
# Gera uma chave aleatória para inserção no dicionário de resultados.
ii32 = np.iinfo(np.int32)
while True:
output_key = np.random.randint(low=ii32.min, high=ii32.max, dtype=np.int32)
# Insere o resultado na lista apropriada do objeto DataInsp
if output_key in data_insp.imaging_results:
# Chave já existe. Como deve ser uma chave nova, repete.
continue
else:
# Chave inexistente. Insere o resultado no dicionário e sai do laço.
data_insp.imaging_results[output_key] = result
break
else:
# Salva o resultado em um objeto ImagingResult já existente em DataInsp.
# Busca o resultado no dicionário baseado na chave.
try:
result = data_insp.imaging_results[output_key]
result.roi = roi
result.description = description
except KeyError:
# Objeto não encontrado no dicionário. Cria um novo.
# Cria um objeto ImagingResult com o resultado do algoritmo e salva a imagem reconstruída.
result = ImagingResult(roi=roi, description=description)
# Salva o novo resultado.
result.image = f
# Guarda o resultado no dicionário.
data_insp.imaging_results[output_key] = result
# Retorna o valor da chave
return output_key
@numba.njit(parallel=True)
def cpwc_roi_dist(xr, zr, xt, theta, c, ts, tgs):
r"""Calcula os *delays* para o DAS do algoritmo CPWC.
Os *delays* são convertidos para índices, a partir do período de
amostragem. Os *delays* são calculados conforme a trajetória da onda
plana, desde o transdutor até o ponto da ROI e de volta para o transdutor.
Parameters
----------
xr : :class:`np.ndarray`
Vetor com os valores de :math:`x` da ROI, em m.
zr : :class:`np.ndarray`
Vetor com os valores de :math:`z` da ROI, em m.
xt : :class:`np.ndarray`
Vetor com os valores de :math:`x` dos elementos do transdutor, em m.
theta : :class:`int`, :class:`float`
Ângulo de inclinação da onda plana, em radianos.
c : :class:`int`, :class:`float`
Velocidade de propagação da onda no meio.
ts : :class:`int`, :class:`float`
Período de amostragem do transdutor.
tgs : :class:`int`, :class:`float`
Tempo do gate inicial.
Returns
-------
:class:`np.ndarray`
Uma matriz de números inteiros :math:`M_r \cdot N_r` por :math:`N`, em
que :math:`M_r` é a quantidade de elementos do vetor :math:`x`,
:math:`N_r` é a quantidade de elementos do vetor :math:`z` e :math:`N`
é a quantidade de elementos do transdutor.
"""
m_r = zr.shape[0]
n_r = xr.shape[0]
n = xt.shape[0]
ti_i = np.int64(tgs / ts)
j = np.zeros((n_r * m_r, n), dtype=np.int64)
for i in numba.prange(n_r):
for jj in range(m_r):
i_i = i * m_r + jj
di = (zr[jj] * np.cos(theta) + xr[i] * np.sin(theta))
dv = np.sqrt(zr[jj] ** 2 + (xr[i] - xt) ** 2)
d = np.rint((di + dv) / (c * ts))
j[i_i, :] = d - ti_i
return j
def cpwc_roi_dist_immersion(xr, zr, xt, theta, c, cmed, ts, tgs, surf):
r"""Calcula os *delays* para o DAS do algoritmo CPWC.
Os *delays* são convertidos para índices, a partir do período de
amostragem. Os *delays* são calculados conforme a trajetória da onda
plana, desde o transdutor até o ponto da ROI e de volta para o transdutor.
Parameters
----------
xr : :class:`np.ndarray`
Vetor com os valores de :math:`x` da ROI, em m.
zr : :class:`np.ndarray`
Vetor com os valores de :math:`z` da ROI, em m.
xt : :class:`np.ndarray`
Vetor com os valores de :math:`x` dos elementos do transdutor, em m.
theta : :class:`int`, :class:`float`
Ângulo de inclinação da onda plana, em radianos.
c : :class:`int`, :class:`float`
Velocidade de propagação da onda no meio.
ts : :class:`int`, :class:`float`
Período de amostragem do transdutor.
tgs : :class:`int`, :class:`float`
Tempo do gate inicial.
surf : :class:`Surface`
Objeto com informações sobre a superfície externa.
Returns
-------
:class:`np.ndarray`
Uma matriz de números inteiros :math:`M_r \cdot N_r` por :math:`N`, em
que :math:`M_r` é a quantidade de elementos do vetor :math:`x`,
:math:`N_r` é a quantidade de elementos do vetor :math:`z` e :math:`N`
é a quantidade de elementos do transdutor.
"""
m_r = zr.shape[0]
n_r = xr.shape[0]
n = xt.shape[0]
ti_i = np.int64(tgs / ts)
j = np.zeros((n_r * m_r, n), dtype=np.int64)
yr = np.zeros_like(xr)
roi_coord = np.zeros((m_r * n_r, 3))
for i in range(n_r):
roi_coord[i*m_r:(i+1)*m_r, 0] = xr[i]
for jj in range(m_r):
roi_coord[i*m_r+jj, 2] = zr[jj]
roi_coord *= 1e3
# Distância dos pixels aos respectivos pontos de entrada
di_mat = (roi_coord[:, 2] - surf.surfaceparam.b) / np.cos(theta)
#di_mat = roi_coord[:, 2] * np.cos(theta) + roi_coord[:, 0] * np.sin(theta)
# Localização dos pontos de entrada
entrypoints = np.zeros_like(roi_coord)
entrypoints[:, 2] = surf.surfaceparam.b
entrypoints[:, 0] = roi_coord[:, 0] - di_mat * np.sin(theta)
# Transformação do ângulo theta pela Lei de Snell
theta_med = np.arcsin(np.sin(theta) * cmed / c)
print(np.array([theta, theta_med])*180/np.pi)
# Distância dos pontos de entrada à origem da frente de onda plana
di_med = entrypoints[:, 2] * np.cos(theta_med) + entrypoints[:, 0] * np.sin(theta_med)
# Distância total de ida
#di = di_med + di_mat
# A Distância de volta é caulculada diretamente pela classe Seurface
elem_center = np.zeros((len(xt), 3))
elem_center[:, 0] = xt * 1e3
[dv_med, dv_mat] = surf.cdist_medium(elem_center, roi_coord)
dv_med = dv_med.transpose()
dv_mat = dv_mat.transpose()
#dv = dv_med + dv_mat
# Dividir distâncias por 1e3
di_med *= 1e-3
di_mat *= 1e-3
dv_med *= 1e-3
dv_mat *= 1e-3
#d_med = np.copy()
#d = np.rint((di_mat + dv_mat) / (c * ts)) + \
# np.rint((di_med + dv_med) / (cmed * ts))
# Faz a operação de soma
d = np.zeros_like(dv_med)
d += dv_med / (cmed * ts)
d += dv_mat / (c * ts)
for i in range(len(xt)):
d[:, i] += di_med / (cmed * ts)
d[:, i] += di_mat / (c * ts)
j = np.array(np.rint(d) - ti_i, dtype=np.int64)
# for i in range(n_r):
# for jj in range(m_r):
# i_i = i * m_r + jj
# # Tem que fazer isso aqui (linha abaixo) para o ponto de entrada
# # e somar do ponto de entrada ao foco
# di = (zr[jj] * np.cos(theta) + xr[i] * np.sin(theta))
# dv = np.sqrt(zr[jj] ** 2 + (xr[i] - xt) ** 2)
# d = np.rint((di + dv) / (c * ts))
# j[i_i, :] = d - ti_i
return j
@numba.njit(parallel=True)
def cpwc_sum(data, img, j):
r"""Realiza a soma para o DAS do algoritmo CPWC.
Parameters
----------
data : :class:`np.ndarray`
Matriz :math:`M` por :math:`N` contendo os dados de aquisição.
img : :class:`np.ndarray`
Vetor :math:`N_r` para acumular os dados.
j : :class:`np.ndarray`
Matriz com os *delays* para cada ponto da ROI. Deve ser uma matriz
:math:`M_r \cdot N_r` por :math:`N`, em que :math:`M_r` é a quantidade
de elementos do vetor :math:`x`, :math:`N_r` é a quantidade de
elementos do vetor :math:`z` e :math:`N` é a quantidade de elementos
do transdutor.
Returns
-------
:class:`np.ndarray`
Vetor 1 por :math:`M_r \cdot N_r` contendo a soma no eixo 1 da matriz.
"""
i = np.arange(j.shape[1])
# img = np.zeros(j.shape[0])
for jj in numba.prange(j.shape[0]):
idx = j[jj, :]
for ii in range(i.shape[0]):
img[jj] += data[idx[ii], ii]
return img
def cpwc_params():
"""Retorna os parâmetros do algoritmo CPWC.
Returns
-------
:class:`dict`
Dicionário, em que a chave ``roi`` representa a região de interesse
utilizada pelo algoritmo, a chave ``output_key`` representa a chave
de identificação do resultado, a chave ``description`` representa a
descrição do resultado, a chave ``sel_shot`` representa o disparo
do transdutor e a chave ``c`` representa a velocidade de propagação
da onda na peça.
"""
return {"roi": ImagingROI(), "output_key": None, "description": "", "sel_shot": 0, "c": 5900.0,
"angles": np.arange(-10, 10 + 1, 1)}
| matheusfdario/role-finder | AUSPEX-smart_wedge/imaging/cpwc.py | cpwc.py | py | 19,398 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "framework.data_types.ImagingROI",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "framework.data_types.DataInsp",
"line_number": 186,
"usage_type": "name"
},
{
"api... |
260051339 | #!/usr/bin/env python
'''
Handy script to prepare the data for Tensorflow object detection API.
'''
import tensorflow as tf
import yaml
import os
import sys
from object_detection.utils import dataset_util
flags = tf.app.flags
flags.DEFINE_string('output_path', 'out.record', 'Path to output TFRecord')
flags.DEFINE_string('annotation_file', '', 'Path to annotation file')
flags.DEFINE_string('data_folder', '', 'Path to data folder')
flags.DEFINE_integer('image_height', 600, 'Height of the image')
flags.DEFINE_integer('image_width', 800, 'Width of the image')
FLAGS = flags.FLAGS
LABELS = {
"Green" : 1,
"Red" : 2,
"Yellow" : 3,
"off" : 4,
}
def create_tf_example(example):
width = FLAGS.image_width
height = FLAGS.image_height
filename = example['filename'] # Filename of the image. Empty if image is not from file
filename = filename.encode()
with tf.gfile.GFile(example['filename'], 'rb') as fid:
encoded_image = fid.read()
image_format = 'jpg'.encode()
xmins = [] # List of normalized left x coordinates in bounding box (1 per box)
xmaxs = [] # List of normalized right x coordinates in bounding box
# (1 per box)
ymins = [] # List of normalized top y coordinates in bounding box (1 per box)
ymaxs = [] # List of normalized bottom y coordinates in bounding box
# (1 per box)
classes_text = [] # List of string class name of bounding box (1 per box)
classes = [] # List of integer class id of bounding box (1 per box)
for box in example['annotations']:
xmins.append(float(box['xmin'] / width))
xmaxs.append(float((box['xmin'] + box['x_width']) / width))
ymins.append(float(box['ymin'] / height))
ymaxs.append(float((box['ymin']+ box['y_height']) / height))
classes_text.append(box['class'].encode())
classes.append(int(LABELS[box['class']]))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_image),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
# Udacity
yaml_file = FLAGS.annotation_file
data_folder = FLAGS.data_folder
samples = yaml.load(open(yaml_file, 'rb').read())
for sample in samples:
sample['filename'] = '%s%s'%(data_folder, sample['filename'])
tf_example = create_tf_example(sample)
writer.write(tf_example.SerializeToString())
writer.close()
if __name__ == '__main__':
tf.app.run() | iamfaisalkhan/CarND-Capstone | traffic_light_detection/prepare_tf_record.py | prepare_tf_record.py | py | 3,358 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tensorflow.app",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.gfile.GFile",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "tens... |
37709437793 | from datetime import timedelta
from flask import Flask, render_template, session, url_for, request, jsonify, app
from pymongo import MongoClient
import hashlib
import json
client = MongoClient('mongodb://bibi:6666667!@3.34.129.197', 27017)
db = client.MyPick31
app = Flask(__name__)
############
#라우팅 함수# : 홈 / 디테일 / 회원가입 / 로그인 / about MyPick31 / DB페이지
############
## + 홈페이지 : 아이스크림 필터링함수 포함..
@app.route('/')
def home_page():
# 로그인하고 조회(세션 값안에 email 이 있다면, 로그인을 진행했다면 세션이 형성되어있어서 체크 가능)
if 'email' in session:
email1 = session['email']
print('Logged in as '+email1)
a = list(db.userdb.find({'auth_id': email1}, {'_id': 0}))
print(a[0].get('nickname'))
return render_template('index.html', session_email=email1, session_nickname=a[0].get('nickname'))
# 로그인 없이 조회
else:
return render_template('index.html')
@app.route('/detail')
def detail_page():
# URL로 보낸 아이스크림 가져오기
ice_cream = request.args.get("ice_cream")
# 로그인하고 조회(세션 값안에 auth_id 가 있다면, 로그인을 진행했다면 세션이 형성되어있어서 체크 가능)
if 'email' in session:
email1 = session['email']
print('Logged in as '+email1)
a = list(db.userdb.find({'auth_id': email1}, {'_id': 0}))
print(a[0].get('nickname'))
return render_template('detail.html', session_email=email1, session_nickname=a[0].get('nickname'), para_data=ice_cream)
# 로그인 없이 조회
else:
return render_template('detail.html', para_data=ice_cream)
@app.route('/register')
def register_page():
return render_template('register.html')
@app.route('/login')
def login_page():
return render_template('login.html')
@app.route('/about')
def about_page():
# 로그인하고 조회(세션 값안에 auth_id 가 있다면, 로그인을 진행했다면 세션이 형성되어있어서 체크 가능)
if 'email' in session:
email1 = session['email']
print('Logged in as ' + email1)
a = list(db.userdb.find({'auth_id': email1}, {'_id': 0}))
print(a[0].get('nickname'))
return render_template('about.html', session_email=email1, session_nickname=a[0].get('nickname'))
# 로그인 없이 조회
else:
return render_template('about.html')
# db에 데이터 넣는 html
@app.route('/insert_db')
def insert_db_page():
# 로그인하고 조회(세션 값안에 auth_id 가 있다면, 로그인을 진행했다면 세션이 형성되어있어서 체크 가능)
if 'email' in session:
email1 = session['email']
print('Logged in as ' + email1)
a = list(db.userdb.find({'auth_id': email1}, {'_id': 0}))
print(a[0].get('nickname'))
return render_template('db_insert.html', session_email=email1, session_nickname=a[0].get('nickname'))
# 로그인 없이 조회
else:
return render_template('db_insert.html')
########################
#회원가입, 로그인, 로그아웃#
########################
# 회원가입
@app.route('/customer_register', methods=['POST'])
def register():
auth_id = request.form['auth_id']
pwd = request.form['pwd']
nickname = request.form['nickname']
# 패스워드 암호화
pw_hash = hashlib.sha256(pwd.encode('utf-8')).hexdigest()
userdb = list(db.userdb.find({}))
# userdb 를 새로 만드는 경우
if len(userdb) == 0:
db.userdb.insert_one({'auth_id':auth_id,'pwd':pw_hash,'nickname':nickname})
return jsonify({'result':'success'})
else:
for i in range(len(userdb)):
if userdb[i].get('auth_id') == auth_id:
return jsonify({'result': 'fail1'})
elif userdb[i].get('nickname') == nickname:
return jsonify({'result':'fail2'})
else:
db.userdb.insert_one({'auth_id':auth_id,'pwd':pw_hash,'nickname':nickname})
return jsonify({'result':'success','userdb':auth_id})
# 로그인
@app.route('/customer_login', methods=['POST'])
def login():
receive_id = request.form['receive_id']
receive_pwd = request.form['receive_pwd']
pwd_hash = hashlib.sha256(receive_pwd.encode('utf-8')).hexdigest()
print(receive_id)
print(session)
session['email'] = receive_id
print(session)
session.permanent = True
# session 유지 시간은 5분으로 한다.
app.permanent_session_lifetime = timedelta(minutes=10)
userdb = list(db.userdb.find({}))
for i in range(len(userdb)):
print(userdb[i])
if userdb[i].get('auth_id') == receive_id:
if userdb[i].get('pwd') == pwd_hash:
user_nickname = userdb[i].get('nickname')
print(user_nickname)
return jsonify({'result':'success','userdb':user_nickname})
else:
return jsonify({'result':'fail1','userdb':'failed'})
else:
return jsonify({'result':'fail2','userdb':'failed'})
#로그아웃
@app.route('/customer_logout', methods=['POST'])
def logout():
session.pop('email',None)
return jsonify({'result':'success'})
########################
#아이스크림 정보 가져오기# -> detail에서 사용
########################
@app.route('/bring_ice_cream', methods=['GET'])
def bring_all_ice_cream():
ice_cream = request.args.get('ice_cream')
bring_signature_db = list(db.signature.find({'name':ice_cream},{'_id':0}))
bring_season_db = list(db.season.find({'name':ice_cream},{'_id':0}))
return(jsonify({'result':'success', 'signature_data':bring_signature_db, 'season_data':bring_season_db}))
##################
##아이스크림 필터링##
##################
# final_flavor 전역변수 선언
#global season_final_flavor
# 최종 signature/season 정보 들어가는 변수
#bring_final_signature_db =[]
#bring_final_season_db = []
# bring_signature_db
@app.route('/bring_signature_ice_cream', methods=['POST'])
def bring_signature_ice_cream():
# 함수 내에서 전역변수로 선언
global signature_final_flavor
print('////signature/////')
receive_ice_cream = json.loads(request.form['ice_cream'])
print(receive_ice_cream)
ice_cream = list(receive_ice_cream.values())
print(ice_cream)
# 모든 아이스크림을 가져올 때
if ice_cream == []:
print('모든 아이스크림 가져오기')
bring_signature_db = list(db.signature.find({},{'_id':0}))
return(jsonify({'result':'success_1','data':bring_signature_db}))
#필터링으로 아이스크림을 가져올 때
else:
print('필터링 아이스크림 가져오기')
# 선언한 전역변수에 빈 배열 넣기
signature_final_flavor = []
print(signature_final_flavor)
# temp_flavor : 사용자가 고른 필터링 요소들을 담은 배열
temp_flavor = ice_cream[0]
print(temp_flavor)
for i in range(len(temp_flavor)):
print(i)
if i == 0 :
print(signature_final_flavor)
signature_filtering_cbase1(temp_flavor[i])
if i == 1 :
signature_filtering_cbase2(temp_flavor[i])
if i == 2 :
signature_filtering_ctopping1(temp_flavor[i])
if i == 3 :
signature_filtering_ctopping2(temp_flavor[i])
if i == 4 :
signature_filtering_csyrup1(temp_flavor[i])
if i == 5 :
signature_filtering_csyrup2(temp_flavor[i])
print('-----------')
#print(signature_final_flavor)
#print(len(signature_final_flavor))
# 중복된 flavor 제거 (list를 set 형태로 바꾼 후, 다시 list형태로 변환)
pure_signature_final_flavor = list(set(signature_final_flavor))
print(pure_signature_final_flavor)
print(len(pure_signature_final_flavor))
bring_filter_signature_db = []
for i in range(len(pure_signature_final_flavor)):
signature_db = list(db.signature.find({'name': pure_signature_final_flavor[i]}, {'_id': 0}))
# 체크박스에서 선택한 요소들을 바탕으로 db에서 모두 가지는 요소의 아이스크림 가져오기
print('비교하기!')
print(signature_db)
print(signature_db[0]['base'])
### 얘가 진또배기 ###
print(signature_db[0]['base'].split(','))
print(temp_flavor)
print(len(temp_flavor))
## 얘네 둘을 비교 ##
# temp_flavor[0] : base1 , temp_flavor[1] : base2 (base1의 하위종목)
print(temp_flavor[0].split(','))
print(temp_flavor[1].split(','))
# base 체크
temp = signature_db[0]['base'].split(',')
temp1 = temp_flavor[0].split(',')
temp2 = temp_flavor[1].split(',')
# topping 체크
temp3 = signature_db[0]['topping'].split(',')
temp4 = temp_flavor[2].split(',')
temp5 = temp_flavor[3].split(',')
# syrup 체크
temp6 = signature_db[0]['syrup'].split(',')
temp7 = temp_flavor[4].split(',')
temp8 = temp_flavor[5].split(',')
print('--------')
print('검사용')
print(temp)
temp_base = temp1 + temp2
print('temp1과temp2를 합친것')
print(temp_base)
# 리스트 내 불필요한 띄어쓰기를 제거하는 for 문
for i in range(len(temp_base)):
temp_base[i] = temp_base[i].replace(" ","")
print(temp_base)
print('--------')
print(temp3)
temp_topping = temp4+temp5
print(temp_topping)
# 리스트 내 불필요한 띄어쓰기를 제거하는 for 문
for i in range(len(temp_topping)):
temp_topping[i] = temp_topping[i].replace(" ", "")
print(temp_topping)
print('---------')
print(temp6)
temp_syrup = temp7+temp8
print(temp_syrup)
# 리스트 내 불필요한 띄어쓰기를 제거하는 for 문
for i in range(len(temp_syrup)):
temp_syrup[i] = temp_syrup[i].replace(" ", "")
print(temp_syrup)
# 테스트 코드 (베이스와 토핑 그리고 시럽을 확인하는 for , if 구문)
for j in temp_base:
print(j)
for k in temp:
print(k)
# base 검사하는 if 구문
if j == k:
print('베이스가 같습니다.')
for l in temp_topping:
print(l)
for p in temp3:
print(p)
# topping 검사하는 if 구문
if l == p:
print('토핑이 같습니다.')
for b in temp_syrup:
print(b)
for s in temp6:
print(s)
if b == s:
print('시럽이 같습니다.')
print(signature_db)
bring_filter_signature_db.append(signature_db)
else:
print('시럽이 다릅니다.')
else:
print('토핑이 다릅니다.')
else:
print('베이스가 다릅니다.')
print('★signature 최종결과★')
print(bring_filter_signature_db)
print(len(bring_filter_signature_db))
for i in range(len(bring_filter_signature_db)):
if i != len(bring_filter_signature_db)-1:
if bring_filter_signature_db[i][0]['name'] == bring_filter_signature_db[i+1][0]['name']:
test1 = []
test1.append(bring_filter_signature_db[i][0])
print(test1)
return (jsonify({'result': 'success_2', 'data': test1}))
return (jsonify({'result' : 'fail'}))
###signature 필터링에 사용되는 함수###
# signature를 할때 사용할 filter 함수와 season을 할 때 사용할 filter 함수 별도로 존재해야함.
def signature_filtering_cbase1(flavor):
print(signature_final_flavor)
print(flavor)
# cbase1 값이 없을 때
if flavor == "":
print('base1 없어!')
pass
# cbase1 값이 있을 때
elif flavor != "":
cbase1 = list(db.signature.find({'base':{'$regex': flavor}},{'_id':0}))
for i in range(len(cbase1)):
cbase1_name = cbase1[i]['name']
signature_final_flavor.append(cbase1_name)
print(signature_final_flavor)
return signature_final_flavor
# cbase2 는 여러개의 정보가 들어갈 수 있기 때문에 for문이 들어가야함.
def signature_filtering_cbase2(flavor):
print(signature_final_flavor)
print(flavor)
#cbase2 값이 없을 때
if flavor == "":
print('base2 없어!')
pass
# cbase2 값이 있을 때
elif flavor != "":
receive_flavor = flavor.split(',')
for i in range(len(receive_flavor)):
cbase2 = list(db.signature.find({'base':{'$regex': receive_flavor[i]}},{'_id':0}))
for j in range(len(cbase2)):
cbase2_name = cbase2[j]['name']
signature_final_flavor.append(cbase2_name)
print(signature_final_flavor)
return signature_final_flavor
def signature_filtering_ctopping1(flavor):
print(signature_final_flavor)
print(flavor)
if flavor == "":
print('topping1 없어!')
pass
elif flavor != "":
ctopping1 = list(db.signature.find({'topping':{'$regex': flavor}},{'_id': 0}))
for i in range(len(ctopping1)):
ctopping1_name = ctopping1[i]['name']
#print(ctopping1_name)
signature_final_flavor.append(ctopping1_name)
#print(signature_final_flavor)
print(signature_final_flavor)
return signature_final_flavor
def signature_filtering_ctopping2(flavor):
print(signature_final_flavor)
print(flavor)
if flavor == "":
print('topping2 없어!')
pass
elif flavor != "":
receive_flavor = flavor.split(',')
for i in range(len(receive_flavor)):
ctopping2 = list(db.signature.find({'topping': {'$regex': receive_flavor[i]}}, {'_id': 0}))
for j in range(len(ctopping2)):
ctopping2_name = ctopping2[j]['name']
signature_final_flavor.append(ctopping2_name)
return signature_final_flavor
def signature_filtering_csyrup1(flavor):
print(signature_final_flavor)
print(flavor)
if flavor == "":
print('syrup1 없어!')
pass
elif flavor != "":
csyrup1 = list(db.signature.find({'syrup':{'$regex': flavor}}, {'_id': 0}))
for i in range(len(csyrup1)):
csyrup1_name = csyrup1[i]['name']
signature_final_flavor.append(csyrup1_name)
print(signature_final_flavor)
return signature_final_flavor
def signature_filtering_csyrup2(flavor):
print(signature_final_flavor)
print(flavor)
if flavor == "":
print('syrup2 없어!')
pass
elif flavor != "":
receive_flavor = flavor.split(',')
for i in range(len(receive_flavor)):
csyrup2 = list(db.signature.find({'syrup': {'$regex': receive_flavor[i]}}, {'_id': 0}))
for j in range(len(csyrup2)):
csyrup2_name = csyrup2[j]['name']
signature_final_flavor.append(csyrup2_name)
print(signature_final_flavor)
return signature_final_flavor
########################################
# bring_season_db
@app.route('/bring_season_ice_cream', methods=['POST'])
def bring_season_ice_cream():
# 함수 내에서 전역변수로 선언
global season_final_flavor
print('/////season//////')
receive_ice_cream = json.loads(request.form['ice_cream'])
ice_cream = list(receive_ice_cream.values())
# 모든 아이스크림을 가져올 때
if ice_cream == []:
print('모든 아이스크림 가져오기')
bring_season_db = list(db.season.find({}, {'_id': 0}))
return (jsonify({'result': 'success_1', 'data': bring_season_db}))
# 필터링으로 아이스크림을 가져올 때
else:
print('필터링 아이스크림 가져오기')
# 선언한 전역변수에 빈 배열 넣기
season_final_flavor = []
temp_flavor = ice_cream[0]
#print(temp_flavor)
for i in range(len(temp_flavor)):
if i == 0:
season_filtering_cbase1(temp_flavor[i])
if i == 1:
season_filtering_cbase2(temp_flavor[i])
if i == 2:
season_filtering_ctopping1(temp_flavor[i])
if i == 3:
season_filtering_ctopping2(temp_flavor[i])
if i == 4:
season_filtering_csyrup1(temp_flavor[i])
if i == 5:
season_filtering_csyrup2(temp_flavor[i])
print('-----------')
print(season_final_flavor)
print(len(season_final_flavor))
# 중복된 flavor 제거 (list를 set 형태로 바꾼 후, 다시 list형태로 변환)
pure_season_final_flavor = list(set(season_final_flavor))
print(pure_season_final_flavor)
print(len(pure_season_final_flavor))
bring_filter_season_db = []
#bring_pure_season_final_flavor_data(pure_season_final_flavor)
for i in range(len(pure_season_final_flavor)):
#print(pure_season_final_flavor[i])
season_db = list(db.season.find({'name': pure_season_final_flavor[i]}, {'_id': 0}))
#bring_filter_season_db.append(season_db)
print('비교하기2!')
print(season_db)
print(season_db[0]['base'])
print(season_db[0]['base'].split(','))
print(temp_flavor)
print(len(temp_flavor))
print(temp_flavor[0].split(','))
print(temp_flavor[1].split(','))
# base 체크
temp = season_db[0]['base'].split(',')
temp1 = temp_flavor[0].split(',')
temp2 = temp_flavor[1].split(',')
# topping 체크
temp3 = season_db[0]['topping'].split(',')
temp4 = temp_flavor[2].split(',')
temp5 = temp_flavor[3].split(',')
# syrup 체크
temp6 = season_db[0]['syrup'].split(',')
temp7 = temp_flavor[4].split(',')
temp8 = temp_flavor[5].split(',')
print('--------')
print('검사용')
print(temp)
temp_base = temp1 + temp2
print('temp1과temp2를 합친것')
print(temp_base)
# 리스트 내 불필요한 띄어쓰기를 제거하는 for 문
for i in range(len(temp_base)):
temp_base[i] = temp_base[i].replace(" ", "")
print(temp_base)
print('--------')
print(temp3)
temp_topping = temp4 + temp5
print(temp_topping)
# 리스트 내 불필요한 띄어쓰기를 제거하는 for 문
for i in range(len(temp_topping)):
temp_topping[i] = temp_topping[i].replace(" ", "")
print(temp_topping)
print('---------')
print(temp6)
temp_syrup = temp7 + temp8
print(temp_syrup)
# 리스트 내 불필요한 띄어쓰기를 제거하는 for 문
for i in range(len(temp_syrup)):
temp_syrup[i] = temp_syrup[i].replace(" ", "")
print(temp_syrup)
# 테스트 코드 (베이스와 토핑 그리고 시럽을 확인하는 for , if 구문)
for j in temp_base:
print(j)
for k in temp:
print(k)
# base 검사하는 if 구문
if j == k:
print('베이스가 같습니다.')
for l in temp_topping:
print(l)
for p in temp3:
print(p)
# topping 검사하는 if 구문
if l == p:
print('토핑이 같습니다.')
for b in temp_syrup:
print(temp_syrup)
print(b)
for s in temp6:
print(temp6)
print(s)
if b == s:
print('시럽이 같습니다.')
print(season_db)
bring_filter_season_db.append(season_db)
else:
print('시럽이 다릅니다.')
else:
print('토핑이 다릅니다.')
else:
print('베이스가 다릅니다.')
# 다음에 코딩할 부분 #
# 중복되는 값을 빼야한다.
print('★season 최종결과★')
print(bring_filter_season_db)
for i in range(len(bring_filter_season_db)):
# 배열을 벗어나기 때문에 -1을 해야한다.
if i != len(bring_filter_season_db)-1:
if bring_filter_season_db[i][0]['name'] != bring_filter_season_db[i+1][0]['name']:
test2 = []
test2.append(bring_filter_season_db[i][0])
print(test2)
return (jsonify({'result': 'success_2', 'data': test2}))
# bring_filter_season_db 가 빈 배열일 때
return (jsonify({'result' : 'fail'}))
####season 필터링에 사용되는 함수#####
def season_filtering_cbase1(flavor):
print(season_final_flavor)
print(flavor)
# cbase1 값이 없을 때
if flavor == "":
print('base1 없어!')
pass
# cbase1 값이 있을 때
elif flavor != "":
cbase1 = list(db.season.find({'base': {'$regex': flavor}}, {'_id': 0}))
for i in range(len(cbase1)):
cbase1_name = cbase1[i]['name']
season_final_flavor.append(cbase1_name)
print(season_final_flavor)
return season_final_flavor
# cbase2 는 여러개의 정보가 들어갈 수 있기 때문에 for문이 들어가야함.
def season_filtering_cbase2(flavor):
print(season_final_flavor)
print(flavor)
# cbase2 값이 없을 때
if flavor == "":
print('base2 없어!')
pass
# cbase2 값이 있을 때
elif flavor != "":
receive_flavor = flavor.split(',')
for i in range(len(receive_flavor)):
cbase2 = list(db.season.find({'base': {'$regex': receive_flavor[i]}}, {'_id': 0}))
for j in range(len(cbase2)):
cbase2_name = cbase2[j]['name']
season_final_flavor.append(cbase2_name)
print(season_final_flavor)
return season_final_flavor
def season_filtering_ctopping1(flavor):
print(season_final_flavor)
print(flavor)
if flavor == "":
print('topping1 없어!')
pass
elif flavor != "":
ctopping1 = list(db.season.find({'topping': {'$regex': flavor}}, {'_id': 0}))
for i in range(len(ctopping1)):
ctopping1_name = ctopping1[i]['name']
# print(ctopping1_name)
season_final_flavor.append(ctopping1_name)
# print(signature_final_flavor)
print(season_final_flavor)
return season_final_flavor
def season_filtering_ctopping2(flavor):
print(season_final_flavor)
print(flavor)
if flavor == "":
print('topping2 없어!')
pass
elif flavor != "":
receive_flavor = flavor.split(',')
for i in range(len(receive_flavor)):
ctopping2 = list(db.season.find({'topping': {'$regex': receive_flavor[i]}}, {'_id': 0}))
for j in range(len(ctopping2)):
ctopping2_name = ctopping2[j]['name']
season_final_flavor.append(ctopping2_name)
return season_final_flavor
def season_filtering_csyrup1(flavor):
print(season_final_flavor)
print(flavor)
if flavor == "":
print('syrup1 없어!')
pass
elif flavor != "":
csyrup1 = list(db.season.find({'syrup': {'$regex': flavor}}, {'_id': 0}))
for i in range(len(csyrup1)):
csyrup1_name = csyrup1[i]['name']
season_final_flavor.append(csyrup1_name)
print(season_final_flavor)
return season_final_flavor
def season_filtering_csyrup2(flavor):
print(season_final_flavor)
print(flavor)
if flavor == "":
print('syrup2 없어!')
pass
elif flavor != "":
receive_flavor = flavor.split(',')
for i in range(len(receive_flavor)):
csyrup2 = list(db.season.find({'syrup': {'$regex': receive_flavor[i]}}, {'_id': 0}))
for j in range(len(csyrup2)):
csyrup2_name = csyrup2[j]['name']
season_final_flavor.append(csyrup2_name)
print(season_final_flavor)
return season_final_flavor
#####################
##아이스크림필터링-bibi#
#####################
# 체크한 베이스로 필터링
@app.route('/checkBase', methods=["POST"])
def checkBase():
# from ajax
sendBases = json.loads(request.form["sendBases"])
checkedBasesList = sendBases['checkedBases']
cbaseList = [] # 배열? 딕셔너리? 배열이 더 나을 것 같은데.
for i in range(len(checkedBasesList)): # 베이스배열 요소 하나씩 입(출)력. i = 체크된 cbase1
# from mongoDB
cbaseList.append(list(db.cbase.find({"cbase1": checkedBasesList[i]}, {'_id': 0})))
return (jsonify({'result': 'success', 'msg': "서버와 연결되었음-베이스", 'data':cbaseList}))
# 체크한 토핑으로 필터링
@app.route('/checkTopping', methods=["POST"])
def checkTopping():
# from ajax
sendToppings = json.loads(request.form["sendToppings"])
checkedToppingsList = sendToppings['checkedToppings']
ctoppingList = [] # 배열? 딕셔너리? 배열이 더 나을 것 같은데.
for i in range(len(checkedToppingsList)): # 베이스배열 요소 하나씩 입(출)력. i = 체크된 cbase1
# from mongoDB
ctoppingList.append(list(db.ctopping.find({"ctopping1": checkedToppingsList[i]}, {'_id': 0})))
return (jsonify({'result': 'success', 'msg': "서버와 연결되었음-베이스", 'data':ctoppingList}))
# 체크한 시럽으로 필터링
@app.route('/checkSyrup', methods=["POST"])
def checkSyrup():
# from ajax
sendSyrups = json.loads(request.form["sendSyrups"])
checkedSyrupsList = sendSyrups['checkedSyrups']
csyrupList = [] # 배열? 딕셔너리? 배열이 더 나을 것 같은데.
for i in range(len(checkedSyrupsList)):
# from mongoDB
csyrupList.append(list(db.csyrup.find({"csyrup1": checkedSyrupsList[i]}, {'_id': 0})))
return (jsonify({'result': 'success', 'msg': "서버와 연결되었음-베이스", 'data':csyrupList}))
###############
#DB insert API#
###############
# Category-Base
@app.route('/createCB', methods=['POST'])
def createCB():
cbase1 = request.form['cbase1']
cbase2 = request.form['cbase2']
doc = {
'cbase1': cbase1,
'cbase2': cbase2
}
db.cbase.insert_one(doc)
return jsonify(({'result':'success','msg':'cbase에 저장완료'}))
# Category-Topping
@app.route('/createCT', methods=['POST'])
def createCT():
ctopping1 = request.form['ctopping1']
ctopping2 = request.form['ctopping2']
doc = {
'ctopping1': ctopping1,
'ctopping2': ctopping2
}
db.ctopping.insert_one(doc)
return jsonify(({'result':'success','msg':'ctopping에 저장완료'}))
# Category - Syrup
@app.route('/createCS', methods=['POST'])
def createCS():
csyrup1 = request.form['csyrup1']
csyrup2 = request.form['csyrup2']
doc = {
'csyrup1': csyrup1,
'csyrup2': csyrup2
}
db.csyrup.insert_one(doc)
return jsonify(({'result':'success','msg':'csyrup에 저장완료'}))
# Flavor - Signature
@app.route('/createF_SG', methods=['POST'])
def createF_signature():
id = request.form['id']
name = request.form['name']
name_eng = request.form['name_eng']
base = request.form['base']
topping = request.form['topping']
syrup = request.form['syrup']
kcal = request.form['kcal']
allergens = request.form['allergens']
img = request.form['img']
doc = {
'id': id,
'name': name,
'name_eng': name_eng,
'base': base,
'topping': topping,
'syrup': syrup,
'kcal': kcal,
'allergens': allergens,
'img': img
}
db.signature.insert_one(doc)
return(jsonify({'result':'success','msg':'signature 저장완료'}))
# Flavor - Season
@app.route('/createF_SS', methods=['POST'])
def createF_season():
id = request.form['id']
name = request.form['name']
name_eng = request.form['name_eng']
base = request.form['base']
topping = request.form['topping']
syrup = request.form['syrup']
kcal = request.form['kcal']
allergens = request.form['allergens']
img = request.form['img']
doc = {
'id': id,
'name': name,
'name_eng': name_eng,
'base': base,
'topping': topping,
'syrup': syrup,
'kcal': kcal,
'allergens': allergens,
'img': img
}
db.season.insert_one(doc)
return(jsonify({'result':'success','msg':'season 저장완료'}))
############
# Like API #
############
@app.route('/like_ice_cream', methods=['POST'])
def like_ice_cream():
ice_cream = request.form['ice_cream_name']
like_user_nickname = request.form['like_user_nickname']
like_db_check = list(db.like.find({'ice_cream':ice_cream,'user_nickname':like_user_nickname},{'_id':0}))
print(like_db_check)
if len(like_db_check) == 0:
doc = {
'ice_cream': ice_cream,
'user_nickname': like_user_nickname
}
db.like.insert_one(doc)
return jsonify({'result': 'success_1','msg':'likeDB에 추가'})
else:
return jsonify({'result': 'success_2', 'msg':'likeDB에 있습니다.'})
@app.route('/check_like_ice_cream', methods=['POST'])
def check_like_ice_cream():
ice_cream = request.form['ice_cream_name']
like_user_nickname = request.form['like_user_nickname']
like_db_check = list(db.like.find({'ice_cream':ice_cream,'user_nickname':like_user_nickname},{'_id':0}))
if len(like_db_check) == 1:
return jsonify({'result': 'success', 'msg':'yes_check'})
if len(like_db_check) == 0:
return jsonify({'result': 'success', 'msg':'no_check'})
@app.route('/like_cancel_ice_cream', methods=['POST'])
def cancel_like_ice_cream():
ice_cream = request.form['ice_cream_name']
like_user_nickname = request.form['like_user_nickname']
db.like.remove({'ice_cream':ice_cream,'user_nickname':like_user_nickname})
return jsonify({'result': 'success'})
@app.route('/counting_like' , methods=['POST'])
def counting_like():
ice_cream = request.form['ice_cream_name']
like_db_check = list(db.like.find({'ice_cream': ice_cream}, {'_id': 0}))
print(like_db_check)
print(len(like_db_check))
return jsonify({'result': 'success', 'like_count':len(like_db_check)})
############
#Review API#
############
# Review Edit
@app.route('/edit_review', methods=['POST'])
def edit_review():
ice_cream_name = request.form['ice_cream_name']
reviewer = request.form['reviewer']
edit_review = request.form['edit_review']
update_review_db = list(db.review.update({'reviewer': reviewer, 'ice_cream': ice_cream_name},{'$set': {'review': edit_review}}))
return jsonify({'result': 'success', 'data':update_review_db})
# Review Save
@app.route('/save_review', methods=['POST'])
def save_reivew():
ice_cream_name = request.form['ice_cream_name']
reviewer = request.form['reviewer']
review = request.form['review']
check_reviewer = list(db.review.find({'ice_cream':ice_cream_name,'reviewer':reviewer},{'_id':0}))
print(check_reviewer)
if len(check_reviewer) == 0:
doc = {
'ice_cream': ice_cream_name,
'reviewer': reviewer,
'review': review
}
db.review.insert_one(doc)
return(jsonify({'result':'success', 'msg':'review 저장완료'}))
else:
return(jsonify({'result':'fail', 'msg':'이미 작성된 review', 'data':check_reviewer[0]['review']}))
# Review_bring
@app.route('/bring_review', methods=['GET'])
def bring_review():
ice_cream = request.args.get("ice_cream")
bring_review_db = list(db.review.find({'ice_cream':ice_cream},{'_id':0}))
return(jsonify({'result':'success', 'data':bring_review_db}))
if __name__ == '__main__':
app.secret_key = 'Juni'
app.run('localhost', port=9000, debug=True) | JiHoon-JK/MyPick31 | app/run.py | run.py | py | 34,199 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.app",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_nu... |
16779725846 | #quicksort
from random import shuffle
import pygame, time
from math import ceil
from random import shuffle
import pygame, time
sizex = 1800
sizey = 1200
surface = pygame.display.set_mode((sizex,sizey))
colour = [255, 5, 5]
black = (0,0,0)
red = (255, 5, 5)
black = (0,0,0)
green = (0, 255, 5)
blue = (5, 5, 255)
n = 1200
nums = [x for x in range(1, n+1)]
shuffle(nums)
delay = 0.00001
def draw_shapes(nums, position, colour): #draw the list
pygame.draw.rect(surface, black, pygame.Rect(ceil(position*(sizex/n)), 0, ceil(len(nums)*sizex/n), sizey))
pygame.display.update()
for i in range(len(nums)):
time.sleep(delay)
pygame.draw.rect(surface, colour, pygame.Rect(ceil(position*(sizex/n)) + ceil(i*(sizex/n)), sizey-ceil((nums[i])*(sizey/n)), ceil(sizex/n), ceil((nums[i])*(sizey/n))))
pygame.display.update(ceil(position*(sizex/n)) + ceil(i*(sizex/n)), sizey-ceil((nums[i])*(sizey/n)), ceil(sizex/n), ceil((nums[i])*(sizey/n)))
def quick_sort(nums, pos):
time.sleep(delay)
draw_shapes(nums, pos, red)
if len(nums) == 0:
draw_shapes(nums, pos, blue)
return nums
#pivot
pivot = nums[-1]
l = []
r = []
for num in nums[:-1]:
if num <= pivot:
l.append(num)
if num > pivot:
r.append(num)
l = quick_sort(l, pos)
r = quick_sort(r, pos+len(l))
#reasembly
nums = [l, [pivot], r]
nums = [item for elem in nums for item in elem]
draw_shapes(nums, pos, red)
return nums
draw_shapes(nums, 0, red)
nums = quick_sort(nums, 0,)
for i in range(len(nums)):
time.sleep(delay/2)
pygame.draw.rect(surface, green, pygame.Rect(ceil((sizex/n)) + ceil(i*(sizex/n)), sizey-ceil((nums[i])*(sizey/n)), ceil(sizex/n), ceil((nums[i])*(sizey/n))))
pygame.display.update(ceil((sizex/n)) + ceil(i*(sizex/n)), sizey-ceil((nums[i])*(sizey/n)), ceil(sizex/n), ceil((nums[i])*(sizey/n)))
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False | FergusMunro/Sorting-Algoritms-in-Pygame | quicksort.py | quicksort.py | py | 2,098 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "random.shuffle",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.dra... |
13990607578 | """
This is faster approach leveraging counting sort algorithm,
whose complexity is linear; O(n), where n = len(s)
It may not be evident that algorithm below is linear, given the
intrinsic nested loops (3 levels). But one way to look at that,
is that we are imposing a tree structure of 2 levels above the list.
On the first level we are grouping per frequency, and on the
second level we group by character. At the bottom level we have
a permutation of the list itself.
Therefore, regardless of how many levels the tree structure above has,
ultimately the dominating factor is the bottom level (recall
for example that leaves level in a full binary tree, has as much
data as the rest of the tree). The bottom level in our imaginary
tree has as many elements as the list (size n); hence the dominating
factor in the nested loops is n itself (adding the extra cost of the
above layers would make cost a multiple of n, leading to O(n)).
Note:
The original count sort algorithm may assume a fixed range
in the values, and iterate through that range. Here we are doing
an small optimization, by iterating only over the observed sub-range
in the list.
"""
from collections import defaultdict
from itertools import imap
class Solution(object):
def frequencySort(self, s):
hist = defaultdict(lambda: 0)
for c in s:
hist[c] += 1
cnt = defaultdict(lambda: [])
min_k, max_k = len(s), 0
for c, k in hist.iteritems():
cnt[k].append(c)
min_k = min(min_k, k)
max_k = max(max_k, k)
sorted_s = ''
for k in xrange(max_k, min_k - 1, -1):
if k in cnt:
sorted_s += ''.join(imap(lambda c: c * k, cnt[k]))
return sorted_s
| dariomx/topcoder-srm | leetcode/zero-pass/google/sort-characters-by-frequency/Solution1.py | Solution1.py | py | 1,767 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "itertools.imap",
"line_number": 46,
"usage_type": "call"
}
] |
18540102269 |
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import os
import random
import math
from datetime import datetime
from collections import Counter
import pandas as pd
import numpy as np
import cv2
from PIL import Image
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from sklearn.model_selection import train_test_split
import xml.etree.ElementTree as ET
import torch
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
class FruitDataset(Dataset):
def __init__(self, paths, bb, y, transforms=False):
self.transforms = transforms
self.paths = paths.values
self.bb = bb.values
self.y = y.values
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
path = self.paths[idx]
y_class = self.y[idx]
x, y_bb = transformsXY(path, self.bb[idx], self.transforms)
x = normalize(x)
x = np.rollaxis(x, 2)
return x, y_class, y_bb
# Reading an image
def read_image(path):
return cv2.cvtColor(cv2.imread(str(path)), cv2.COLOR_BGR2RGB)
def create_mask(bb, x):
"""Creates a mask for the bounding box of same shape as image"""
rows, cols, *_ = x.shape
Y = np.zeros((rows, cols))
bb = bb.astype(np.int)
Y[bb[0]:bb[2], bb[1]:bb[3]] = 1.
return Y
def mask_to_bb(Y):
"""Convert mask Y to a bounding box, assumes 0 as background nonzero object"""
cols, rows = np.nonzero(Y)
if len(cols) == 0:
return np.zeros(4, dtype=np.float32)
top_row = np.min(rows)
left_col = np.min(cols)
bottom_row = np.max(rows)
right_col = np.max(cols)
return np.array([left_col, top_row, right_col, bottom_row], dtype=np.float32)
def create_bb_array(x):
"""Generates bounding box array from a train_df row"""
return np.array([x[5], x[4], x[7], x[6]])
def resize_image_bb(read_path, write_path, bb, sz):
"""Resize an image and its bounding box and write image to new path"""
im = read_image(read_path)
im_resized = cv2.resize(im, (int(1.49 * sz), sz))
Y_resized = cv2.resize(create_mask(bb, im), (int(1.49 * sz), sz))
new_path = str(write_path / read_path.parts[-1])
cv2.imwrite(new_path, cv2.cvtColor(im_resized, cv2.COLOR_RGB2BGR))
return new_path, mask_to_bb(Y_resized)
# modified from fast.ai
def crop(im, r, c, target_r, target_c):
return im[r:r + target_r, c:c + target_c]
# random crop to the original size
def random_crop(x, r_pix=8):
""" Returns a random crop"""
r, c, *_ = x.shape
c_pix = round(r_pix * c / r)
rand_r = random.uniform(0, 1)
rand_c = random.uniform(0, 1)
start_r = np.floor(2 * rand_r * r_pix).astype(int)
start_c = np.floor(2 * rand_c * c_pix).astype(int)
return crop(x, start_r, start_c, r - 2 * r_pix, c - 2 * c_pix)
def center_crop(x, r_pix=8):
r, c, *_ = x.shape
c_pix = round(r_pix * c / r)
return crop(x, r_pix, c_pix, r - 2 * r_pix, c - 2 * c_pix)
def rotate_cv(im, deg, y=False, mode=cv2.BORDER_REFLECT, interpolation=cv2.INTER_AREA):
""" Rotates an image by deg degrees"""
r, c, *_ = im.shape
M = cv2.getRotationMatrix2D((c / 2, r / 2), deg, 1)
if y:
return cv2.warpAffine(im, M, (c, r), borderMode=cv2.BORDER_CONSTANT)
return cv2.warpAffine(im, M, (c, r), borderMode=mode, flags=cv2.WARP_FILL_OUTLIERS + interpolation)
def random_cropXY(x, Y, r_pix=8):
""" Returns a random crop"""
r, c, *_ = x.shape
c_pix = round(r_pix * c / r)
rand_r = random.uniform(0, 1)
rand_c = random.uniform(0, 1)
start_r = np.floor(2 * rand_r * r_pix).astype(int)
start_c = np.floor(2 * rand_c * c_pix).astype(int)
xx = crop(x, start_r, start_c, r - 2 * r_pix, c - 2 * c_pix)
YY = crop(Y, start_r, start_c, r - 2 * r_pix, c - 2 * c_pix)
return xx, YY
def transformsXY(path, bb, transforms):
x = cv2.imread(str(path)).astype(np.float32)
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) / 255
Y = create_mask(bb, x)
if transforms:
rdeg = (np.random.random() - .50) * 20
x = rotate_cv(x, rdeg)
Y = rotate_cv(Y, rdeg, y=True)
if np.random.random() > 0.5:
x = np.fliplr(x).copy()
Y = np.fliplr(Y).copy()
x, Y = random_cropXY(x, Y)
else:
x, Y = center_crop(x), center_crop(Y)
return x, mask_to_bb(Y)
def create_corner_rect(bb, color='red'):
bb = np.array(bb, dtype=np.float32)
return plt.Rectangle((bb[1], bb[0]), bb[3] - bb[1], bb[2] - bb[0], color=color,
fill=False, lw=3)
def show_corner_bb(im, bb):
plt.imshow(im)
plt.gca().add_patch(create_corner_rect(bb))
plt.show()
def normalize(im):
"""Normalizes images with Imagenet stats."""
imagenet_stats = np.array([[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]])
return (im - imagenet_stats[0]) / imagenet_stats[1]
def getData():
train_path ='../data/Train_Images/'
df = pd.read_csv('../data/Train.csv', usecols=['Image_ID'])
for index, row in df.iterrows():
row['Image_ID'] = train_path + row['Image_ID'] + '.jpg'
row['Image_ID'] = Path(row['Image_ID'])
df.rename(
columns=({'Image_ID': 'filename'}),
inplace=True,
)
df['width'] = pd.read_csv('../data/Train.csv', usecols=['width'])
df['height'] = pd.read_csv('../data/Train.csv', usecols=['height'])
df['class'] = pd.read_csv('../data/Train.csv', usecols=['class'])
df['xmin'] = pd.read_csv('../data/Train.csv', usecols=['xmin'])
df['ymin'] = pd.read_csv('../data/Train.csv', usecols=['ymin'])
xmax = []
ymax = []
for index, row in df.iterrows():
xmax.append(row['xmin'] + row['width'])
ymax.append(row['ymin'] + row['height'])
df['xmax'] = xmax
df['ymax'] = ymax
df_train = df
class_dict = {'fruit_healthy': 0, 'fruit_brownspot': 1, 'fruit_woodiness': 2}
key_list = list(class_dict.keys())
val_list = list(class_dict.values())
df_train['class'] = df_train['class'].apply(lambda x: class_dict[x])
# Populating Training DF with new paths and bounding boxes
new_paths = []
new_bbs = []
train_path_resized = Path('../data/Train_Images_Resized/')
for index, row in df_train.iterrows():
new_path, new_bb = resize_image_bb(row['filename'], train_path_resized, create_bb_array(row.values), 300)
new_paths.append(new_path)
new_bbs.append(new_bb)
df_train['new_path'] = new_paths
df_train['new_bb'] = new_bbs
df_train = df_train.reset_index()
X = df_train[['new_path', 'new_bb']]
Y = df_train['class']
return X, Y, key_list, val_list
| byrongt12/CNN_fruit_disease_detection | code/dataset.py | dataset.py | py | 6,842 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "numpy.rollaxis",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
... |
19271830686 | import uuid
from flask import request
from flask.views import MethodView
from flask_smorest import abort, Blueprint
from resources.db import *
from schemas import StoreSchema
from models import StoreModel
from resources.db import db
from sqlalchemy.exc import SQLAlchemyError, IntegrityError
blb = Blueprint("stores", __name__, description="Operations on stores")
@blb.route("/store/<int:store_id>")
class Store(MethodView):
@blb.response(200, StoreSchema)
def get(self, store_id):
store = StoreModel.query.get_or_404(store_id)
return store
# try:
# return stores[store_id]
# except KeyError:
# abort(404, message="Store not found.")
def delete(self, store_id):
store = StoreModel.query.get_or_404(store_id)
db.session.delete(store)
db.session.commit()
return {"message": "Store Deleted"}
# raise NotImplemented("Deleting a Store is not implemented.")
# try:
# del stores[store_id]
# return {"message": "Store Deleted."}
# except KeyError:
# abort(404, message="Store not found.")
@blb.route("/store")
class StoreList(MethodView):
@blb.response(200, StoreSchema(many=True))
def get(self):
return StoreModel.query.all()
# return stores.values()
# return {"stores": list(stores.values())}
@blb.arguments(StoreSchema)
@blb.response(200, StoreSchema)
def post(self, store_data):
# store_data = request.get_json()
# if "name" not in store_data:
# abort(
# 400,
# message="Bad request. Ensure 'name' is included in the JSON payload.",
# )
# for store in stores.values():
# if store_data["name"] == store["name"]:
# abort(400, message=f"Store already exists.")
#
# store_id = uuid.uuid4().hex
# store = {**store_data, "id": store_id}
# stores[store_id] = store
store = StoreModel(**store_data)
try:
db.session.add(store)
db.session.commit()
except IntegrityError:
abort(
400,
message="A store with that name already exists.",
)
except SQLAlchemyError:
abort(500, message=f"An error occurred creating the store.{SQLAlchemyError}")
return store, 201
| ahmad22us/rest-apis-project | resources/store.py | store.py | py | 2,426 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_smorest.Blueprint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.StoreModel.query.get_or_404",
"line_number": 18,
"usage_type": "call"
},
{
... |
40857498901 | from __future__ import print_function
import numpy as np
import scipy as sp
from healpy import query_disc
from numba import jit
from picca import constants
from picca.utils import print
# npb = number of parallel bins (to avoid collision with numpy np)
npb = None
ntb = None
ntm= None
npm= None
rp_max = None
rp_min = None
z_cut_max = None
z_cut_min = None
rt_max = None
angmax = None
nside = None
counter = None
ndata = None
ndata2 = None
zref = None
alpha= None
alpha2= None
alpha_abs= None
lambda_abs = None
lambda_abs2 = None
data = None
data2 = None
cosmo=None
rej = None
lock = None
x_correlation = None
ang_correlation = None
remove_same_half_plate_close_pairs = False
def fill_neighs(pix):
for ipix in pix:
for d1 in data[ipix]:
npix = query_disc(nside,[d1.xcart,d1.ycart,d1.zcart],angmax,inclusive = True)
npix = [p for p in npix if p in data]
neighs = [d for p in npix for d in data[p] if d1.thid != d.thid]
ang = d1^neighs
w = ang<angmax
neighs = np.array(neighs)[w]
d1.dneighs = [d for d in neighs if d1.ra > d.ra and (d.z[-1]+d1.z[-1])/2.>=z_cut_min and (d.z[-1]+d1.z[-1])/2.<z_cut_max ]
def fill_neighs_x_correlation(pix):
for ipix in pix:
for d1 in data[ipix]:
npix = query_disc(nside,[d1.xcart,d1.ycart,d1.zcart],angmax,inclusive = True)
npix = [p for p in npix if p in data2]
neighs = [d for p in npix for d in data2[p] if d1.thid != d.thid]
ang = d1^neighs
w = (ang<angmax)
neighs = np.array(neighs)[w]
d1.dneighs = [d for d in neighs if (d.z[-1]+d1.z[-1])/2.>=z_cut_min and (d.z[-1]+d1.z[-1])/2.<z_cut_max ]
def cf(pix):
xi = np.zeros(npb*ntb)
we = np.zeros(npb*ntb)
rp = np.zeros(npb*ntb)
rt = np.zeros(npb*ntb)
z = np.zeros(npb*ntb)
nb = np.zeros(npb*ntb,dtype=sp.int64)
for ipix in pix:
for d1 in data[ipix]:
print("\rcomputing xi: {}%".format(round(counter.value*100./ndata,2)),end="")
with lock:
counter.value += 1
for d2 in d1.dneighs:
ang = d1^d2
same_half_plate = (d1.plate == d2.plate) and\
( (d1.fid<=500 and d2.fid<=500) or (d1.fid>500 and d2.fid>500) )
if ang_correlation:
cw,cd,crp,crt,cz,cnb = fast_cf(d1.z,10.**d1.ll,10.**d1.ll,d1.we,d1.de,
d2.z,10.**d2.ll,10.**d2.ll,d2.we,d2.de,ang,same_half_plate)
else:
cw,cd,crp,crt,cz,cnb = fast_cf(d1.z,d1.r_comov,d1.rdm_comov,d1.we,d1.de,
d2.z,d2.r_comov,d2.rdm_comov,d2.we,d2.de,ang,same_half_plate)
xi[:len(cd)]+=cd
we[:len(cw)]+=cw
rp[:len(crp)]+=crp
rt[:len(crp)]+=crt
z[:len(crp)]+=cz
nb[:len(cnb)]+=cnb.astype(int)
setattr(d1,"neighs",None)
w = we>0
xi[w]/=we[w]
rp[w]/=we[w]
rt[w]/=we[w]
z[w]/=we[w]
return we,xi,rp,rt,z,nb
@jit
def fast_cf(z1,r1,rdm1,w1,d1, z2,r2,rdm2,w2,d2, ang,same_half_plate):
wd1 = d1*w1
wd2 = d2*w2
if ang_correlation:
rp = r1/r2[:,None]
if not x_correlation:
rp[(rp<1.)] = 1./rp[(rp<1.)]
rt = ang*np.ones_like(rp)
else:
rp = (r1-r2[:,None])*sp.cos(ang/2)
if not x_correlation :
rp = abs(rp)
rt = (rdm1+rdm2[:,None])*sp.sin(ang/2)
wd12 = wd1*wd2[:,None]
w12 = w1*w2[:,None]
z = (z1+z2[:,None])/2
w = (rp<rp_max) & (rt<rt_max) & (rp>=rp_min)
rp = rp[w]
rt = rt[w]
z = z[w]
wd12 = wd12[w]
w12 = w12[w]
bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npb).astype(int)
bt = (rt/rt_max*ntb).astype(int)
bins = bt + ntb*bp
if remove_same_half_plate_close_pairs and same_half_plate:
w = abs(rp)<(rp_max-rp_min)/npb
wd12[w] = 0.
w12[w] = 0.
cd = np.bincount(bins,weights=wd12)
cw = np.bincount(bins,weights=w12)
crp = np.bincount(bins,weights=rp*w12)
crt = np.bincount(bins,weights=rt*w12)
cz = np.bincount(bins,weights=z*w12)
cnb = np.bincount(bins,weights=(w12>0.))
return cw,cd,crp,crt,cz,cnb
def dmat(pix):
dm = np.zeros(npb*ntb*ntm*npm)
wdm = np.zeros(npb*ntb)
rpeff = np.zeros(ntm*npm)
rteff = np.zeros(ntm*npm)
zeff = np.zeros(ntm*npm)
weff = np.zeros(ntm*npm)
npairs = 0
npairs_used = 0
for p in pix:
for d1 in data[p]:
print("\rcomputing xi: {}%".format(round(counter.value*100./ndata,3)),end="")
with lock:
counter.value += 1
order1 = d1.order
r1 = d1.r_comov
rdm1 = d1.rdm_comov
w1 = d1.we
l1 = d1.ll
z1 = d1.z
r = sp.random.rand(len(d1.dneighs))
w=r>rej
npairs += len(d1.dneighs)
npairs_used += w.sum()
for d2 in np.array(d1.dneighs)[w]:
same_half_plate = (d1.plate == d2.plate) and\
( (d1.fid<=500 and d2.fid<=500) or (d1.fid>500 and d2.fid>500) )
order2 = d2.order
ang = d1^d2
r2 = d2.r_comov
rdm2 = d2.rdm_comov
w2 = d2.we
l2 = d2.ll
z2 = d2.z
fill_dmat(l1,l2,r1,r2,rdm1,rdm2,z1,z2,w1,w2,ang,wdm,dm,rpeff,rteff,zeff,weff,same_half_plate,order1,order2)
setattr(d1,"neighs",None)
return wdm,dm.reshape(npb*ntb,npm*ntm),rpeff,rteff,zeff,weff,npairs,npairs_used
@jit
def fill_dmat(l1,l2,r1,r2,rdm1,rdm2,z1,z2,w1,w2,ang,wdm,dm,rpeff,rteff,zeff,weff,same_half_plate,order1,order2):
rp = (r1[:,None]-r2)*sp.cos(ang/2)
if not x_correlation:
rp = abs(rp)
rt = (rdm1[:,None]+rdm2)*sp.sin(ang/2)
z = (z1[:,None]+z2)/2.
w = (rp<rp_max) & (rt<rt_max) & (rp>=rp_min)
bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npb).astype(int)
bt = (rt/rt_max*ntb).astype(int)
bins = bt + ntb*bp
bins = bins[w]
m_bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npm).astype(int)
m_bt = (rt/rt_max*ntm).astype(int)
m_bins = m_bt + ntm*m_bp
m_bins = m_bins[w]
sw1 = w1.sum()
sw2 = w2.sum()
ml1 = sp.average(l1,weights=w1)
ml2 = sp.average(l2,weights=w2)
dl1 = l1-ml1
dl2 = l2-ml2
slw1 = (w1*dl1**2).sum()
slw2 = (w2*dl2**2).sum()
n1 = len(l1)
n2 = len(l2)
ij = np.arange(n1)[:,None]+n1*np.arange(n2)
ij = ij[w]
we = w1[:,None]*w2
we = we[w]
if remove_same_half_plate_close_pairs and same_half_plate:
wsame = abs(rp[w])<(rp_max-rp_min)/npb
we[wsame] = 0.
c = np.bincount(m_bins,weights=we*rp[w])
rpeff[:c.size] += c
c = np.bincount(m_bins,weights=we*rt[w])
rteff[:c.size] += c
c = np.bincount(m_bins,weights=we*z[w])
zeff[:c.size] += c
c = np.bincount(m_bins,weights=we)
weff[:c.size] += c
c = np.bincount(bins,weights=we)
wdm[:len(c)] += c
eta1 = np.zeros(npm*ntm*n1)
eta2 = np.zeros(npm*ntm*n2)
eta3 = np.zeros(npm*ntm*n1)
eta4 = np.zeros(npm*ntm*n2)
eta5 = np.zeros(npm*ntm)
eta6 = np.zeros(npm*ntm)
eta7 = np.zeros(npm*ntm)
eta8 = np.zeros(npm*ntm)
c = np.bincount(ij%n1+n1*m_bins,weights=(np.ones(n1)[:,None]*w2)[w]/sw2)
eta1[:len(c)]+=c
c = np.bincount((ij-ij%n1)//n1+n2*m_bins,weights = (w1[:,None]*np.ones(n2))[w]/sw1)
eta2[:len(c)]+=c
c = np.bincount(m_bins,weights=(w1[:,None]*w2)[w]/sw1/sw2)
eta5[:len(c)]+=c
if order2==1:
c = np.bincount(ij%n1+n1*m_bins,weights=(np.ones(n1)[:,None]*w2*dl2)[w]/slw2)
eta3[:len(c)]+=c
c = np.bincount(m_bins,weights=(w1[:,None]*(w2*dl2))[w]/sw1/slw2)
eta6[:len(c)]+=c
if order1==1:
c = np.bincount((ij-ij%n1)//n1+n2*m_bins,weights = ((w1*dl1)[:,None]*np.ones(n2))[w]/slw1)
eta4[:len(c)]+=c
c = np.bincount(m_bins,weights=((w1*dl1)[:,None]*w2)[w]/slw1/sw2)
eta7[:len(c)]+=c
if order2==1:
c = np.bincount(m_bins,weights=((w1*dl1)[:,None]*(w2*dl2))[w]/slw1/slw2)
eta8[:len(c)]+=c
ubb = np.unique(m_bins)
for k, (ba,m_ba) in enumerate(zip(bins,m_bins)):
dm[m_ba+npm*ntm*ba]+=we[k]
i = ij[k]%n1
j = (ij[k]-i)//n1
for bb in ubb:
dm[bb+npm*ntm*ba] += we[k]*(eta5[bb]+eta6[bb]*dl2[j]+eta7[bb]*dl1[i]+eta8[bb]*dl1[i]*dl2[j])\
- we[k]*(eta1[i+n1*bb]+eta3[i+n1*bb]*dl2[j]+eta2[j+n2*bb]+eta4[j+n2*bb]*dl1[i])
def metal_dmat(pix,abs_igm1="LYA",abs_igm2="SiIII(1207)"):
dm = np.zeros(npb*ntb*ntm*npm)
wdm = np.zeros(npb*ntb)
rpeff = np.zeros(ntm*npm)
rteff = np.zeros(ntm*npm)
zeff = np.zeros(ntm*npm)
weff = np.zeros(ntm*npm)
npairs = 0
npairs_used = 0
for p in pix:
for d1 in data[p]:
print("\rcomputing metal dmat {} {}: {}%".format(abs_igm1,abs_igm2,round(counter.value*100./ndata,3)),end="")
with lock:
counter.value += 1
r = sp.random.rand(len(d1.dneighs))
w=r>rej
npairs += len(d1.dneighs)
npairs_used += w.sum()
for d2 in np.array(d1.dneighs)[w]:
r1 = d1.r_comov
rdm1 = d1.rdm_comov
z1_abs1 = 10**d1.ll/constants.absorber_IGM[abs_igm1]-1
r1_abs1 = cosmo.r_comoving(z1_abs1)
rdm1_abs1 = cosmo.dm(z1_abs1)
w1 = d1.we
wzcut = z1_abs1<d1.zqso
r1 = r1[wzcut]
rdm1 = rdm1[wzcut]
w1 = w1[wzcut]
r1_abs1 = r1_abs1[wzcut]
rdm1_abs1 = rdm1_abs1[wzcut]
z1_abs1 = z1_abs1[wzcut]
same_half_plate = (d1.plate == d2.plate) and\
( (d1.fid<=500 and d2.fid<=500) or (d1.fid>500 and d2.fid>500) )
ang = d1^d2
r2 = d2.r_comov
rdm2 = d2.rdm_comov
z2_abs2 = 10**d2.ll/constants.absorber_IGM[abs_igm2]-1
r2_abs2 = cosmo.r_comoving(z2_abs2)
rdm2_abs2 = cosmo.dm(z2_abs2)
w2 = d2.we
wzcut = z2_abs2<d2.zqso
r2 = r2[wzcut]
rdm2 = rdm2[wzcut]
w2 = w2[wzcut]
r2_abs2 = r2_abs2[wzcut]
rdm2_abs2 = rdm2_abs2[wzcut]
z2_abs2 = z2_abs2[wzcut]
rp = (r1[:,None]-r2)*sp.cos(ang/2)
if not x_correlation:
rp = abs(rp)
rt = (rdm1[:,None]+rdm2)*sp.sin(ang/2)
w12 = w1[:,None]*w2
bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npb).astype(int)
bt = (rt/rt_max*ntb).astype(int)
if remove_same_half_plate_close_pairs and same_half_plate:
wp = abs(rp) < (rp_max-rp_min)/npb
w12[wp] = 0.
bA = bt + ntb*bp
wA = (bp<npb) & (bt<ntb) & (bp >=0)
c = np.bincount(bA[wA],weights=w12[wA])
wdm[:len(c)]+=c
rp_abs1_abs2 = (r1_abs1[:,None]-r2_abs2)*sp.cos(ang/2)
if not x_correlation:
rp_abs1_abs2 = abs(rp_abs1_abs2)
rt_abs1_abs2 = (rdm1_abs1[:,None]+rdm2_abs2)*sp.sin(ang/2)
zwe12 = (1+z1_abs1[:,None])**(alpha_abs[abs_igm1]-1)*(1+z2_abs2)**(alpha_abs[abs_igm2]-1)/(1+zref)**(alpha_abs[abs_igm1]+alpha_abs[abs_igm2]-2)
bp_abs1_abs2 = sp.floor((rp_abs1_abs2-rp_min)/(rp_max-rp_min)*npm).astype(int)
bt_abs1_abs2 = (rt_abs1_abs2/rt_max*ntm).astype(int)
bBma = bt_abs1_abs2 + ntm*bp_abs1_abs2
wBma = (bp_abs1_abs2<npm) & (bt_abs1_abs2<ntm) & (bp_abs1_abs2>=0)
wAB = wA & wBma
c = np.bincount(bBma[wAB]+npm*ntm*bA[wAB],weights=w12[wAB]*zwe12[wAB])
dm[:len(c)]+=c
c = np.bincount(bBma[wAB],weights=rp_abs1_abs2[wAB]*w12[wAB]*zwe12[wAB])
rpeff[:len(c)]+=c
c = np.bincount(bBma[wAB],weights=rt_abs1_abs2[wAB]*w12[wAB]*zwe12[wAB])
rteff[:len(c)]+=c
c = np.bincount(bBma[wAB],weights=(z1_abs1[:,None]+z2_abs2)[wAB]/2*w12[wAB]*zwe12[wAB])
zeff[:len(c)]+=c
c = np.bincount(bBma[wAB],weights=w12[wAB]*zwe12[wAB])
weff[:len(c)]+=c
if ((not x_correlation) and (abs_igm1 != abs_igm2)) or (x_correlation and (lambda_abs == lambda_abs2)):
r1 = d1.r_comov
rdm1 = d1.rdm_comov
w1 = d1.we
z1_abs2 = 10**d1.ll/constants.absorber_IGM[abs_igm2]-1
r1_abs2 = cosmo.r_comoving(z1_abs2)
rdm1_abs2 = cosmo.dm(z1_abs2)
wzcut = z1_abs2<d1.zqso
r1 = r1[wzcut]
rdm1 = rdm1[wzcut]
w1 = w1[wzcut]
z1_abs2 = z1_abs2[wzcut]
r1_abs2 = r1_abs2[wzcut]
rdm1_abs2 = rdm1_abs2[wzcut]
r2 = d2.r_comov
rdm2 = d2.rdm_comov
w2 = d2.we
z2_abs1 = 10**d2.ll/constants.absorber_IGM[abs_igm1]-1
r2_abs1 = cosmo.r_comoving(z2_abs1)
rdm2_abs1 = cosmo.dm(z2_abs1)
wzcut = z2_abs1<d2.zqso
r2 = r2[wzcut]
rdm2 = rdm2[wzcut]
w2 = w2[wzcut]
z2_abs1 = z2_abs1[wzcut]
r2_abs1 = r2_abs1[wzcut]
rdm2_abs1 = rdm2_abs1[wzcut]
rp = (r1[:,None]-r2)*sp.cos(ang/2)
if not x_correlation:
rp = abs(rp)
rt = (rdm1[:,None]+rdm2)*sp.sin(ang/2)
w12 = w1[:,None]*w2
bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npb).astype(int)
bt = (rt/rt_max*ntb).astype(int)
if remove_same_half_plate_close_pairs and same_half_plate:
wp = abs(rp) < (rp_max-rp_min)/npb
w12[wp] = 0.
bA = bt + ntb*bp
wA = (bp<npb) & (bt<ntb) & (bp >=0)
c = np.bincount(bA[wA],weights=w12[wA])
wdm[:len(c)]+=c
rp_abs2_abs1 = (r1_abs2[:,None]-r2_abs1)*sp.cos(ang/2)
if not x_correlation:
rp_abs2_abs1 = abs(rp_abs2_abs1)
rt_abs2_abs1 = (rdm1_abs2[:,None]+rdm2_abs1)*sp.sin(ang/2)
zwe21 = (1+z1_abs2[:,None])**(alpha_abs[abs_igm2]-1)*(1+z2_abs1)**(alpha_abs[abs_igm1]-1)/(1+zref)**(alpha_abs[abs_igm1]+alpha_abs[abs_igm2]-2)
bp_abs2_abs1 = sp.floor((rp_abs2_abs1-rp_min)/(rp_max-rp_min)*npm).astype(int)
bt_abs2_abs1 = (rt_abs2_abs1/rt_max*ntm).astype(int)
bBam = bt_abs2_abs1 + ntm*bp_abs2_abs1
wBam = (bp_abs2_abs1<npm) & (bt_abs2_abs1<ntm) & (bp_abs2_abs1>=0)
wAB = wA & wBam
c = np.bincount(bBam[wAB],weights=rp_abs2_abs1[wAB]*w12[wAB]*zwe21[wAB])
rpeff[:len(c)]+=c
c = np.bincount(bBam[wAB],weights=rt_abs2_abs1[wAB]*w12[wAB]*zwe21[wAB])
rteff[:len(c)]+=c
c = np.bincount(bBam[wAB],weights=(z1_abs2[:,None]+z2_abs1)[wAB]/2*w12[wAB]*zwe21[wAB])
zeff[:len(c)]+=c
c = np.bincount(bBam[wAB],weights=w12[wAB]*zwe21[wAB])
weff[:len(c)]+=c
c = np.bincount(bBam[wAB]+npm*ntm*bA[wAB],weights=w12[wAB]*zwe21[wAB])
dm[:len(c)]+=c
setattr(d1,"neighs",None)
return wdm,dm.reshape(npb*ntb,npm*ntm),rpeff,rteff,zeff,weff,npairs,npairs_used
n1d = None
lmin = None
lmax = None
dll = None
def cf1d(pix):
xi1d = np.zeros(n1d**2)
we1d = np.zeros(n1d**2)
nb1d = np.zeros(n1d**2,dtype=sp.int64)
for d in data[pix]:
bins = ((d.ll-lmin)/dll+0.5).astype(int)
bins = bins + n1d*bins[:,None]
wde = d.we*d.de
we = d.we
xi1d[bins] += wde * wde[:,None]
we1d[bins] += we*we[:,None]
nb1d[bins] += (we*we[:,None]>0.).astype(int)
w = we1d>0
xi1d[w]/=we1d[w]
return we1d,xi1d,nb1d
def x_forest_cf1d(pix):
xi1d = np.zeros(n1d**2)
we1d = np.zeros(n1d**2)
nb1d = np.zeros(n1d**2,dtype=sp.int64)
for d1 in data[pix]:
bins1 = ((d1.ll-lmin)/dll+0.5).astype(int)
wde1 = d1.we*d1.de
we1 = d1.we
d2thingid = [d2.thid for d2 in data2[pix]]
neighs = data2[pix][np.in1d(d2thingid,[d1.thid])]
for d2 in neighs:
bins2 = ((d2.ll-lmin)/dll+0.5).astype(int)
bins = bins1 + n1d*bins2[:,None]
wde2 = d2.we*d2.de
we2 = d2.we
xi1d[bins] += wde1 * wde2[:,None]
we1d[bins] += we1*we2[:,None]
nb1d[bins] += (we1*we2[:,None]>0.).astype(int)
w = we1d>0
xi1d[w]/=we1d[w]
return we1d,xi1d,nb1d
v1d = {}
c1d = {}
max_diagram = None
cfWick = {}
## auto
def wickT(pix):
T1 = np.zeros((npb*ntb,npb*ntb))
T2 = np.zeros((npb*ntb,npb*ntb))
T3 = np.zeros((npb*ntb,npb*ntb))
T4 = np.zeros((npb*ntb,npb*ntb))
T5 = np.zeros((npb*ntb,npb*ntb))
T6 = np.zeros((npb*ntb,npb*ntb))
wAll = np.zeros(npb*ntb)
nb = np.zeros(npb*ntb,dtype=sp.int64)
npairs = 0
npairs_used = 0
for ipix in pix:
r = sp.random.rand(len(data[ipix]))
w = r>rej
npairs += len(data[ipix])
npairs_used += w.sum()
if w.sum()==0: continue
for d1 in [ td for ti,td in enumerate(data[ipix]) if w[ti] ]:
print("\rcomputing xi: {}%".format(round(counter.value*100./ndata/(1.-rej),3)),end="")
with lock:
counter.value += 1
if len(d1.dneighs)==0: continue
v1 = v1d[d1.fname](d1.ll)
w1 = d1.we
c1d_1 = (w1*w1[:,None])*c1d[d1.fname](abs(d1.ll-d1.ll[:,None]))*np.sqrt(v1*v1[:,None])
r1 = d1.r_comov
z1 = d1.z
for i2,d2 in enumerate(d1.dneighs):
ang12 = d1^d2
v2 = v1d[d2.fname](d2.ll)
w2 = d2.we
c1d_2 = (w2*w2[:,None])*c1d[d2.fname](abs(d2.ll-d2.ll[:,None]))*np.sqrt(v2*v2[:,None])
r2 = d2.r_comov
z2 = d2.z
fill_wickT123(r1,r2,ang12,w1,d2.we,z1,z2,c1d_1,c1d_2,wAll,nb,T1,T2,T3)
if max_diagram<=3: continue
### d3 and d2 have the same 'fname'
for d3 in d1.dneighs[:i2]:
ang13 = d1^d3
ang23 = d2^d3
v3 = v1d[d3.fname](d3.ll)
w3 = d3.we
c1d_3 = (w3*w3[:,None])*c1d[d3.fname](abs(d3.ll-d3.ll[:,None]))*np.sqrt(v3*v3[:,None])
r3 = d3.r_comov
z3 = d3.z
fill_wickT45(r1,r2,r3, ang12,ang13,ang23, w1,w2,w3,
z1,z2,z3, c1d_1,c1d_2,c1d_3,
d1.fname,d2.fname,d3.fname,
T4,T5)
### TODO: when there is two different catalogs
### d3 and d1 have the same 'fname'
return wAll, nb, npairs, npairs_used, T1, T2, T3, T4, T5, T6
@jit
def fill_wickT123(r1,r2,ang,w1,w2,z1,z2,c1d_1,c1d_2,wAll,nb,T1,T2,T3):
n1 = len(r1)
n2 = len(r2)
i1 = np.arange(n1)
i2 = np.arange(n2)
zw1 = ((1+z1)/(1+zref))**(alpha-1)
zw2 = ((1+z2)/(1+zref))**(alpha2-1)
bins = i1[:,None]+n1*i2
rp = (r1[:,None]-r2)*sp.cos(ang/2)
if not x_correlation:
rp = abs(rp)
rt = (r1[:,None]+r2)*sp.sin(ang/2)
bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npb).astype(int)
bt = (rt/rt_max*ntb).astype(int)
ba = bt + ntb*bp
we = w1[:,None]*w2
we1 = w1[:,None]*np.ones(w2.size)
we2 = np.ones(w1.size)[:,None]*w2
zw = zw1[:,None]*zw2
w = (rp<rp_max) & (rt<rt_max) & (rp>=rp_min)
if w.sum()==0: return
bins = bins[w]
ba = ba[w]
we = we[w]
we1 = we1[w]
we2 = we2[w]
zw = zw[w]
for k1 in range(ba.size):
p1 = ba[k1]
i1 = bins[k1]%n1
j1 = (bins[k1]-i1)//n1
wAll[p1] += we[k1]
nb[p1] += 1
T1[p1,p1] += we[k1]*zw[k1]
for k2 in range(k1+1,ba.size):
p2 = ba[k2]
i2 = bins[k2]%n1
j2 = (bins[k2]-i2)//n1
if i1==i2:
prod = c1d_2[j1,j2]*we1[k1]*zw1[i1]
T2[p1,p2] += prod
T2[p2,p1] += prod
elif j1==j2:
prod = c1d_1[i1,i2]*we2[k2]*zw2[j1]
T2[p1,p2] += prod
T2[p2,p1] += prod
else:
prod = c1d_1[i1,i2]*c1d_2[j1,j2]
T3[p1,p2] += prod
T3[p2,p1] += prod
return
@jit
def fill_wickT45(r1,r2,r3, ang12,ang13,ang23, w1,w2,w3, z1,z2,z3, c1d_1,c1d_2,c1d_3, fname1,fname2,fname3, T4,T5):
"""
"""
### forest-1 x forest-2
rp = (r1[:,None]-r2)*sp.cos(ang12/2.)
if not x_correlation:
rp = np.absolute(rp)
rt = (r1[:,None]+r2)*sp.sin(ang12/2.)
pix1_12 = (np.arange(r1.size)[:,None]*np.ones(r2.size)).astype(int)
pix2_12 = (np.ones(r1.size)[:,None]*np.arange(r2.size)).astype(int)
w = (rp<rp_max) & (rt<rt_max) & (rp>=rp_min)
if w.sum()==0: return
bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npb).astype(int)
bt = (rt/rt_max*ntb).astype(int)
ba12 = bt + ntb*bp
ba12[~w] = 0
cf12 = cfWick['{}_{}'.format(fname1,fname2)][ba12]
cf12[~w] = 0.
ba12 = ba12[w]
pix1_12 = pix1_12[w]
pix2_12 = pix2_12[w]
### forest-1 x forest-3
rp = (r1[:,None]-r3)*sp.cos(ang13/2.)
if not x_correlation:
rp = np.absolute(rp)
rt = (r1[:,None]+r3)*sp.sin(ang13/2.)
pix1_13 = (np.arange(r1.size)[:,None]*np.ones(r3.size)).astype(int)
pix3_13 = (np.ones(r1.size)[:,None]*np.arange(r3.size)).astype(int)
w = (rp<rp_max) & (rt<rt_max) & (rp>=rp_min)
if w.sum()==0: return
bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npb).astype(int)
bt = (rt/rt_max*ntb).astype(int)
ba13 = bt + ntb*bp
ba13[~w] = 0
cf13 = cfWick['{}_{}'.format(fname1,fname3)][ba13]
cf13[~w] = 0.
ba13 = ba13[w]
pix1_13 = pix1_13[w]
pix3_13 = pix3_13[w]
### forest-2 x forest-3
rp = (r2[:,None]-r3)*sp.cos(ang23/2.)
if not x_correlation:
rp = np.absolute(rp)
rt = (r2[:,None]+r3)*sp.sin(ang23/2.)
pix2_23 = (np.arange(r2.size)[:,None]*np.ones(r3.size)).astype(int)
pix3_23 = (np.ones(r2.size)[:,None]*np.arange(r3.size)).astype(int)
w = (rp<rp_max) & (rt<rt_max) & (rp>=rp_min)
if w.sum()==0: return
bp = sp.floor((rp-rp_min)/(rp_max-rp_min)*npb).astype(int)
bt = (rt/rt_max*ntb).astype(int)
ba23 = bt + ntb*bp
ba23[~w] = 0
cf23 = cfWick['{}_{}'.format(fname2,fname3)][ba23]
cf23[~w] = 0.
ba23 = ba23[w]
pix2_23 = pix2_23[w]
pix3_23 = pix3_23[w]
### Wick T4 and T5
for k1,p1 in enumerate(ba12):
tpix1_12 = pix1_12[k1]
tpix2_12 = pix2_12[k1]
for k2,p2 in enumerate(ba13):
tpix1_13 = pix1_13[k2]
tpix3_13 = pix3_13[k2]
tcf23 = cf23[tpix2_12,tpix3_13]
if tpix1_12==tpix1_13:
wcorr = w1[tpix1_12]*tcf23 ### TODO work on the good formula
T4[p1,p2] += wcorr
T4[p2,p1] += wcorr
else:
wcorr = c1d_1[tpix1_12,tpix1_13]*tcf23 ### TODO work on the good formula
T5[p1,p2] += wcorr
T5[p2,p1] += wcorr
return
| vserret/picca | py/picca/cf.py | cf.py | py | 24,065 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "healpy.query_disc",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "healpy.query_disc",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"li... |
22729212301 | #!/usr/bin/python3
import argparse
import os
import re
parser = argparse.ArgumentParser(description='Creates a directory with rasdaman import-ready files structure by creating symlinks to the cubeR native file structure.')
parser.add_argument('--dataDir', default='/media/GFTP/landsupport/cubeR/tiles/', help='directory containing tiles subdirs')
parser.add_argument('--dateFrom', default='1900-01-01')
parser.add_argument('--dateTo', default='3000-01-01')
parser.add_argument('--tilesFile', help='path to a file storing a list of tiles to be processed (one per line)')
parser.add_argument('--tiles', help='comma-separated list of tiles to be processed')
parser.add_argument('band', help='name of a band to be processed')
parser.add_argument('targetDir', help='directory to write symbolic links to')
args = parser.parse_args()
bandF = '_%s_' % args.band
tiles = []
if args.tilesFile is not None:
with open(args.tilesFile) as f:
tiles = f.readlines()
tiles = [x.strip() for x in tiles]
if args.tiles is not None:
tiles += args.tiles.split(',')
if not os.path.exists(args.targetDir):
os.makedirs(args.targetDir, 0o770)
ext = None
try: from osgeo import gdal
except: ext = '.tif'
utms = os.listdir(args.dataDir)
utms.sort()
if len(tiles) > 0:
utms = [x for x in utms if x in tiles]
targetPaths = []
for utm in utms:
if not os.path.isdir(os.path.join(args.dataDir, utm)):
continue
files = os.listdir(os.path.join(args.dataDir, utm))
files.sort()
for fl in files:
if bandF in fl:
localPath = os.path.join(args.dataDir, utm, fl)
(date, band, tile) = fl[0:-4].split('_')
if len(tiles) > 0 and tile not in tiles:
continue
period = re.sub('^([0-9]+(-[0-9]+)?(-[0-9]+)?)', '', date)
if period == '':
period = 'none'
date = re.sub('^([0-9]+(-[0-9]+)?(-[0-9]+)?).*$', '\\1', date)
if date > args.dateTo[0:len(date)] or date < args.dateFrom[0:len(date)]:
continue
if len(date) == 4:
date += '-01'
if len(date) == 7:
date += '-01'
# assume all files have same format
if ext is None:
ext = '.tif'
if 'JP2' in gdal.Open(localPath).GetDriver().ShortName:
ext = '.jp2'
targetPath = os.path.join(args.targetDir, '_'.join((date, period, band, tile)) + ext)
if os.path.islink(targetPath) or os.path.isfile(targetPath):
os.unlink(targetPath)
os.symlink(localPath, targetPath)
targetPaths.append(os.path.basename(targetPath))
toDel = set(os.listdir(args.targetDir)) - set(targetPaths)
for i in toDel:
try:
os.unlink(os.path.join(args.targetDir, i))
except: pass
| IVFL-BOKU/landsupport | python/rename2rasdaman.py | rename2rasdaman.py | py | 2,855 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
... |
11488764915 | from enum import Enum
from pathlib import Path
from typer import Option, Typer
from .upload import upload
class Track(str, Enum):
internal = "internal"
alpha = "alpha"
beta = "beta"
production = "production"
rollout = "rollout"
app = Typer()
@app.callback()
def callback():
"""
Android Publisher
"""
@app.command(name="upload")
def upload_command(
package_name: str,
aab_file: Path = Option(
"app.aab",
exists=True,
file_okay=True,
dir_okay=False,
writable=False,
readable=True,
resolve_path=True,
),
track: Track = Option(Track.internal),
json_key: Path = Option(
"credential.json",
exists=True,
file_okay=True,
dir_okay=False,
writable=False,
readable=True,
resolve_path=True,
),
):
upload(
package_name=package_name,
aab_file=str(aab_file),
track=track.value,
json_key=str(json_key),
)
| leynier/androidpublisher | androidpublisher/main.py | main.py | py | 1,007 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typer.Typer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 4... |
32341359777 | '''
Created on Jul 19, 2011
@author: rtaylor
'''
from segment import Segment
from shell import Shell
from circle import Circle
from mymath import reflect,calcShellAngle
from math import tan,atan
from numpy.linalg import norm
class Module:
'''
A complete foxsi module. By default, it consists of seven nested shells.
'''
def __init__(self,
base = [0,0,0],
seglen = 30.0,
focal = 200.0,
radii = [5.151,4.9,4.659,4.429,4.21,4.0,3.799],
angles = None
):
'''
Constructor
Parameters:
base: the center point of the wide end of the segment
seglen: the axial length of each segment
focal: the focal length, measured from the center of the module
radii: a list of radii, one for each shell from biggest to smallest
angles: optional parameter to overwrite the shell angles computed by constructor
'''
if angles is None:
angles = calcShellAngle(radii,focal)
elif len(radii) != len(angles):
raise ValueError('number of radii and number of angles do not match')
self.shells = []
for i,r in enumerate(radii):
self.shells.append(Shell(base=base, seglen=seglen, ang=angles[i], r=r))
# inner core (blocks rays going through center of module)
r0 = self.shells[-1].back.r0
r1 = r0 - seglen * tan(4*angles[-1])
ang = atan((r0-r1)/(2*seglen))
self.core = Segment(base=base, seglen=2*seglen, ang=ang, r0=r0)
self.coreFaces = [Circle(center=base,normal=[0,0,1],radius=r0),
Circle(center=[base[0],base[1],base[2]+2*seglen],normal=[0,0,-1],radius=r1)]
def getDims(self):
'''
Returns the module's dimensions:
[radius at wide end, radius at small end, length]
'''
front = self.shells[0].front
back = self.shells[0].back
return [front.r0, back.r1, front.seglen+back.seglen]
def getSurfaces(self):
'''
Returns a list of surfaces
'''
surfaces = []
for shell in self.shells:
surfaces.extend(shell.getSurfaces())
surfaces.append(self.core)
surfaces.extend(self.coreFaces)
return(surfaces)
def passRays(self, rays, robust = False):
'''
Takes an array of rays and passes them through the front end of
the module.
'''
#print 'Module: passing ',len(rays),' rays'
# get all module surfaces
allSurfaces = self.getSurfaces()
allSurfaces.remove(self.coreFaces[0]) # we'll test these seperately
allSurfaces.remove(self.coreFaces[1])
# create regions consisting of adjacent shells
regions = [None for shell in self.shells]
for i,shell in enumerate(self.shells):
# innermost shell
if i == len(self.shells)-1:
regions[i] = shell.getSurfaces()
regions[i].append(self.core)
else:
regions[i] = shell.getSurfaces() # outer shell (reflective side facing region)
regions[i].extend(self.shells[i+1].getSurfaces()) # nested shell (non reflective)
for ray in rays:
# skip rays that hit a core face
if ray.pos[2] < self.coreFaces[0].center[2]:
sol = self.coreFaces[0].rayIntersect(ray)
if sol is not None:
ray.pos = ray.getPoint(sol[2])
ray.bounces += 1
ray.dead = True
continue
else: ray.moveToZ(self.coreFaces[0].center[2])
elif ray.pos[2] > self.coreFaces[1].center[2]:
sol = self.coreFaces[1].rayIntersect(ray)
if sol is not None:
ray.pos = ray.getPoint(sol[2])
ray.bounces += 1
ray.dead = True
continue
else: ray.moveToZ(self.coreFaces[1].center[2])
# reset surfaces
surfaces = [s for s in allSurfaces]
firstBounce = True # used for optimization
# while ray is inside module
while True:
# find nearest ray intersection
bestDist = None
bestSol = None
bestSurf = None
for surface in surfaces:
sol = surface.rayIntersect(ray)
if sol is not None:
dist = norm(ray.getPoint(sol[2])-ray.pos)
if bestDist is None or dist < bestDist:
bestDist = dist
bestSol = sol
bestSurf = surface
# if a closest intersection was found
if bestSol is not None:
# update ray
ray.pos = ray.getPoint(bestSol[2])
ray.bounces += 1
x = reflect(ray.ori,bestSurf.getNormal(bestSol[0],bestSol[1]))
# if reflected
if x is not None:
ray.ori = x / norm(x) # update ori to unit vector reflection
# otherwise, no reflection means ray is dead
else:
ray.dead = True
break
# knowing the surface it has just hit, we can
# narrow down the number of surface to test
# remove shells the ray cannot even 'see'
if firstBounce:
firstBounce = False
for region in regions:
if bestSurf is region[0] or bestSurf is region[1]:
surfaces = [s for s in region]
break
# assuming each segment can be hit no more than once
# eliminate the surface from our list
if not robust: surfaces.remove(bestSurf)
# if no intersection, ray can exit module
else: break
def plot2D(self, axes, color = 'b'):
'''
Plots a 2d cross section of the module
'''
for shell in self.shells:
shell.plot2D(axes, color)
# plot core
self.core.plot2D(axes, color)
base = self.core.base
r0 = self.core.r0
r1 = self.core.r1
seglen = self.core.seglen
axes.plot((base[2],base[2]),(r0,-r0),'-'+color)
axes.plot((base[2]+seglen,base[2]+seglen),(r1,-r1),'-'+color)
def plot3D(self, axes, color = 'b'):
'''
Generates a 3d plot of the module in the given figure
'''
for shell in self.shells:
shell.plot3D(axes,color)
def targetFront(self,a,b):
'''
Takes two list arguments of equal size, the elements of which range from 0 to 1.
Returns an array of points that exist on the circle defined by the wide end of
the module.
'''
#must modify 'a' so that we dont return points from the core
r0 = self.shells[0].front.r0
r1 = self.core.r0
a0 = (r1/r0)**2 # the 'a' value that gives r1=sqrt(a)*r0
adiff = 1 - a0
for i in range(len(a)):
a[i] = a[i]*adiff + a0
return self.shells[0].targetFront(a,b)
def targetBack(self,a,b):
'''
Takes two list arguments of equal size, the elements of which range from 0 to 1.
Returns an array of points that exist on the circle defined by the small end of
the module.
'''
#must modify 'a' so that we dont return points from the core
r0 = self.shells[0].back.r1
r1 = self.core.r1
a0 = (r1/r0)**2 # the 'a' value that gives r1=sqrt(a)*r0
adiff = 1 - a0
for i in range(len(a)):
a[i] = a[i]*adiff + a0
return self.shells[0].targetBack(a,b)
| humatic/foxsi-optics-sim | src/foxsisim/module.py | module.py | py | 8,412 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mymath.calcShellAngle",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "shell.Shell",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "math.tan",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "math.atan",
"line_numb... |
7055227899 | #from django.shortcuts import render
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
import redis
# Create your views here.
def home(request): # load page home
return render(request,'task/home.html')
#*************************************************************
def is_redis(request):
sel="1"
gt="1"
ip1 = '192.168.1.1'
ip2 = '192.168.1.2'
ip3 = '192.168.1.3'
ip4 = '192.168.1.4'
if request.method == 'POST':
r = redis.StrictRedis()
sel= request.POST.get('sel','')
#if(sel)
#listip=['192.168.1.1','192.168.1.2','192.168.1.3','192.168.1.4']
#return HttpResponse(sel)
#return HttpResponse(sel)
#r.setex('sel',sel)
r.set('sel', sel)
gt=r.get('sel')
#return HttpResponse(gt)
return render(request,'task/redis.html',context={'sel':sel,'ip1':ip1,'ip2':ip2,'ip3':ip3,'ip4':ip4,'gt':gt})
| rasoolgh71/isredis | task/views.py | views.py | py | 947 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "redis.StrictRedis",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 30,
"usage_type": "call"
}
] |
23012205675 | """
Este código implementa a parte Coletora de um programa de fila de mensagens que coleta,
classifica e distribui tweets de acordo com tópicos selecionados pelo cliente.
Autores:
- Caio Miglioli @caiomiglioli
- Ryan Lazaretti @ryanramos01
Data de Criação: 30 de Maio de 2023
Ultima alteração: 31 de Maio de 2023
"""
import pika
from json import dumps
from csv import reader
from time import sleep
def tw2dict(header, tw):
"""
Recebe um vetor header contendo o nome de cada coluna do csv
Recebe um vetor tw contendo os valores de uma coluna qualquer do csv
Junta ambos em um dicionário chave-valor e retorna
"""
res = dict()
for i, head in enumerate(header):
res[head] = tw[i]
return res
def publishFromCSV(filename, channel, interval):
"""
Função que lê cada linha do CSV, e coordena a publicação chamando as funções necessárias (tw2dict e publish)
"""
with open(filename, newline='') as tws:
_reader = reader(tws)
header = None
for i, tw in enumerate(_reader):
# if i > 1: break #enviar somente 1 tweet xd
if i == 0:
header = tw
continue
t = tw2dict(header, tw)
publish(channel, t)
sleep(interval)
#end for
#end open
def publish(channel, tw):
"""
Recebe um channel de MQ e um dicionario contendo as informações do tweet,
retira somente os valores importantes e publica na fila de mensagens para o classificador
"""
# user = literal_eval(tw["tweet_user"])
body = {
"tweet": tw['text'],
"at": tw['name'],
# "tweet": tw['tweet_full_text'],
# "author": user["name"],
# "at": user['screen_name']
}
channel.basic_publish(exchange='', routing_key='raw-tweets', body=dumps(body))
print(f'tweet by @{body["at"]} published')
#end publish
def main():
# cria a conexão com o RabbitMQ
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', '5672'))
print('connection started')
# declara a fila
channel = connection.channel()
channel.queue_declare(queue='raw-tweets')
print('raw-tweets queue started')
# envia os tweets para a fila
try:
publishFromCSV('tweet_data.csv', channel, .5)
finally:
print('connection closed')
connection.close()
if __name__ == '__main__':
main() | caiomiglioli/sistemas-distribuidos | mq/colector/colector.py | colector.py | py | 2,464 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pika.BlockingConnection",
"line_... |
34684398194 | #!/Users/kalindbl/bin/virtualenv/biopython/bin/python
'''
Take an xml file of MDS-IES annotations and produce a new XML file with these added annotations:
- whether the MDS annotation is "valid," i.e. covers the MAC chromosome completely with only pointer overlap
- which MAC contigs come from multiple MIC loci
- which MAC contigs come from scrambled loci
- if scrambled, the sequence of MDS at the MIC locus
Voted least efficient script of the year by everyone ever.
'''
from lxml import etree
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser(description='annotate MDS/IES descriptors with scrambling information')
parser.add_argument('input', help='input MDS/IES database, xml format')
parser.add_argument('contig_lengths', help='text file containing the lengths of MAC contigs, sans telomeres')
parser.add_argument('--coverage_cutoff', help='number of BP on the MAC chromosome that can be unaccounted for before it\'s called invalid.', type=int, default=50)
parser.add_argument('--overlap_cutoff', help='percent of an MDS that must be covered by another before it\'s called invalid.', type=float, default=0.8)
parser.add_argument('output', help='file to which to write (xml) output')
args = parser.parse_args()
def validate_contig(mds_map, contig_length):
'''
Takes the MDS structure of a contig and determines whether it is adequately covered, without overlap. Returns true if contig is valid.
'''
# First, we want the MDS annotations to extend all along the chromosome
if mds_map[0][0] > args.coverage_cutoff or contig_length - mds_map[-1][1] > args.coverage_cutoff:
# This does run into the problem that because the
# MDS were sorted with respect to final endpoint, the first
# on the list may not have the lowest coordinate.
# However, if the endpoint of the actual first MDS is
# lower than the endpoint of the second MDS, the two overlap
# and should be discarded anyway.
return False
# Are there gaps greater than the coverage cutoff
# within the sequence? Are there overlaps?
# Move pairwise over the MDS...
for pair in zip(mds_map, mds_map[1:]):
# Determine gap
gap = pair[1][0] - pair[0][1]
if gap > args.coverage_cutoff:
return False
if gap < 0 and abs(gap) > (0.8 * pair[0][1] - pair[0][0]):
return False
# If all that passed, the contig is valid
return True
def get_scrambling(mac_mds_map, mic_mds_map):
'''
Take MDS structure for a MAC contig at a particular MIC locus and determine whether it's out of order. Returns the scrambling pattern if yes, or false otherwise.
'''
# Because neither the MAC nor the MIC is necessarily
# in order in the XML, this is going to be a clumsy
# sorting operation
# Label both sequences with indices
# (We only need the end coordinate for each MDS,
# since that's what we're going to sort on anyway.)
mac = zip([tup[1] for tup in mac_mds_map], xrange(len(mac_mds_map)))
mic = zip([tup[1] for tup in mic_mds_map], xrange(len(mic_mds_map)))
# Now sort both 5'-3'
mac.sort(key=lambda x: x[0])
mic.sort(key=lambda x: x[0])
# If their indices no longer match, the contig is scrambled...
new_mac_idx = [tup[1] for tup in mac]
new_mic_idx = [tup[1] for tup in mic]
if new_mac_idx != new_mic_idx:
# If they're mirrors, that just means the MAC contig is backwards...
if sorted(new_mac_idx) != new_mic_idx:
# ...and the order of scrambling is the order of
# the sorted MIC indices sorted by the sorted MAC indices! Whew!
return ':'.join([str(new_mic_idx[idx]) for idx in new_mac_idx])
return False
mac_contigs = defaultdict(list)
mic_loci = defaultdict(list)
multi_locus_contigs = []
valid_maps = []
scrambled_maps = {}
# Make the length dict
lengths = {line.split('\t')[0]:int(line.split('\t')[1].strip()) for line in open(args.contig_lengths)}
# Pull in the XML annotations
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse(args.input, parser)
maps = tree.getiterator('map')
for map in maps:
map_id = map.get('id')
mic = []
mac = []
contig = map[1].text
mds = map.getiterator('mds')
for indv_mds in mds:
mic.append((int(indv_mds[0].get('start')), int(indv_mds[0].get('end'))))
mac.append((int(indv_mds[1].get('start')), int(indv_mds[1].get('end'))))
# Mark this as a scrambled map if appropriate
scrambling = get_scrambling(mac, mic)
if scrambling:
scrambled_maps[map_id] = scrambling
if len(mic_loci[contig]):
# flag this as a multi-locus contig
if contig not in multi_locus_contigs:
multi_locus_contigs.append(contig)
mac_contigs[contig] += mac
mic_loci[contig] += mic
# Now go through and validate all contigs
for contig in mac_contigs:
# Since MDS may be coming from several separate maps, they need to be sorted by coordinate
mac_contigs[contig].sort(cmp=lambda x, y: cmp(x[1], y[1]))
if validate_contig(mac_contigs[contig], lengths[contig]):
valid_maps.append(contig)
# Annnnd go through the tree *again* to add appropriate annotations for output
for map in tree.getiterator('map'):
if map.get('id') in scrambled_maps:
map.insert(2, etree.Element('scrambled', order=scrambled_maps[map.get('id')]))
if map[1].text in valid_maps:
map.insert(2, etree.Element('valid'))
# Finally, write this monstrosity back to a file
outfile = open(args.output, 'w')
outfile.write(etree.tostring(tree.getroot(), xml_declaration=True, pretty_print=True))
outfile.close()
| kelind/scrambling-pfsa | annotate_xml_maps.py | annotate_xml_maps.py | py | 5,720 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 90,
"usage_type": "call"
},
{
"api_name"... |
21144519916 | import json
import logging
import os
import pickle
import random
import multiprocessing
from copy import deepcopy
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from ipdb import set_trace
from transformers import AutoTokenizer, BertTokenizer
class MemExamples:
def __init__(self):
pass
class LpBERTDataset(Dataset):
def __init__(self, max_len, triplets, cui2concepts, cui2desc, tokenizer):
"""
将数据集转变为MLM和NSP任务所需要的格式
:param triplets:这里data是原始的三元组
:param cui2concepts:这是cui对应的所有同义词
:param cui2desc:这是给cui的详细解释
"""
self.triplets = triplets
self.cui2desc = cui2desc
self.cui2concepts = cui2concepts
self.nums = len(triplets)
self.max_len = max_len # 这是每句话的最大长度
self.tokenizer = tokenizer
def __len__(self):
return self.nums
def collate_fn(self,data):
return data
def __getitem__(self, index):
"""
每取一个数据,处理一个数据,这个时候需要保证data是一个两列的数据....
这里将数据转变为MLM和NSP所需要的格式....
:param index:
:return:
"""
triple = self.triplets[index].strip().split('\t')
head_cui, rel, tail_cui = triple
rel = rel.replace("_", " ")
head_desc = ""
if head_cui in self.cui2desc:
head_desc = self.cui2desc[head_cui]
tail_desc = ""
if tail_cui in self.cui2desc:
tail_desc = self.cui2desc[tail_cui]
try:
head_ent = random.choice(self.cui2concepts[head_cui])
tail_ent = random.choice(self.cui2concepts[tail_cui])
except:
return None
rel_tokens = self.tokenizer.tokenize(rel)
rel_token_ids = self.tokenizer.convert_tokens_to_ids(rel_tokens)
# rel mrm + mlm
rel_tokens_ids, rel_mrm_label, head_ent_desc_ids, head_ent_desc_label, tail_ent_desc_ids, tail_ent_desc_label = self.rel_mlm(
head_ent, head_desc, tail_ent, tail_desc, rel)
rel_mrm_input_ids = [self.tokenizer.convert_tokens_to_ids('[CLS]')] + head_ent_desc_ids + [
self.tokenizer.convert_tokens_to_ids('[SEP]')] + rel_tokens_ids + [
self.tokenizer.convert_tokens_to_ids('[SEP]')] + tail_ent_desc_ids + [
self.tokenizer.convert_tokens_to_ids('[SEP]')]
rel_mrm_attention_mask = [1] * len(rel_mrm_input_ids)
rel_mrm_token_type_ids = [0] * (2 + len(head_ent_desc_ids))
rel_mrm_token_type_ids += [1] * (1 + len(rel_token_ids))
rel_mrm_token_type_ids += [2] * (1 + len(tail_ent_desc_ids))
rel_mrm_label_ids = [-100] + head_ent_desc_label + [-100] + rel_mrm_label + [-100] + tail_ent_desc_label + [
-100]
assert len(rel_mrm_input_ids) == len(rel_mrm_attention_mask) == len(rel_mrm_token_type_ids) == len(
rel_mrm_label_ids), "mrm 数据生成不一致"
if len(rel_mrm_input_ids) > self.max_len:
# todo:需要从desc进行下手裁剪
# 直接跳过这个数据
return
else:
pad_len = self.max_len - len(rel_mrm_label_ids)
rel_mrm_input_ids += [0] * pad_len
rel_mrm_attention_mask += [0] * pad_len
rel_mrm_token_type_ids += [0] * pad_len
rel_mrm_label_ids += [-100] * pad_len
head_tokens_ids, head_mem_label = self.head_entity_mlm(head_ent)
# 这是head mem任务+mlm任务
head_desc_token_ids = self.tokenizer.encode(head_desc)[1:-1]
head_mem_input_ids = [self.tokenizer.convert_tokens_to_ids('[CLS]')] + head_tokens_ids + head_desc_token_ids + [
self.tokenizer.convert_tokens_to_ids('[SEP]')] + rel_token_ids + [
self.tokenizer.convert_tokens_to_ids('[SEP]')] + tail_ent_desc_ids + [
self.tokenizer.convert_tokens_to_ids('[SEP]')]
head_mem_attention_mask = [1] * len(head_mem_input_ids)
head_mem_token_type_ids = [0] * (2 + len(head_tokens_ids) + len(head_desc_token_ids))
head_mem_token_type_ids += [1] * (1 + len(rel_token_ids))
head_mem_token_type_ids += [2] * (1 + len(tail_ent_desc_ids))
head_mem_label_ids = [-100] + head_mem_label + [-100] * len(head_desc_token_ids) + [-100] + [-100] * len(
rel_token_ids) + [-100] + tail_ent_desc_label + [-100]
# todo: 最长长度进行限制
assert len(head_mem_input_ids) == len(head_mem_attention_mask) == len(head_mem_attention_mask) == len(
head_mem_label_ids)
if len(head_mem_input_ids) > self.max_len:
# todo:需要从desc进行下手裁剪
pass
else:
pad_len = self.max_len - len(head_mem_input_ids)
head_mem_input_ids += [0] * pad_len
head_mem_attention_mask += [0] * pad_len
head_mem_token_type_ids += [0] * pad_len
head_mem_label_ids += [-100] * pad_len
# tail mem + mlm
tail_tokens_ids, tail_mem_label = self.tail_entity_mlm(tail_ent)
tail_desc_token_ids = self.tokenizer.encode(tail_desc)[1:-1]
tail_mem_input_ids = [self.tokenizer.convert_tokens_to_ids('[CLS]')] + head_ent_desc_ids + [
self.tokenizer.convert_tokens_to_ids('[SEP]')] + rel_token_ids + [self.tokenizer.convert_tokens_to_ids(
'[SEP]')] + tail_tokens_ids + tail_desc_token_ids + [self.tokenizer.convert_tokens_to_ids('[SEP]')]
tail_mem_attention_mask = [1] * len(tail_mem_input_ids)
tail_mem_token_type_ids = [0] * (2 + len(head_ent_desc_ids))
tail_mem_token_type_ids += [1] * (1 + len(rel_token_ids))
tail_mem_token_type_ids += [2] * (1 + len(tail_tokens_ids) + len(tail_desc_token_ids))
tail_mem_label_ids = [-100] + head_ent_desc_label + [-100] + [-100] * len(rel_token_ids) + [
-100] + tail_mem_label + [-100] * len(tail_desc_token_ids) + [-100]
assert len(tail_mem_input_ids) == len(tail_mem_attention_mask) == len(tail_mem_token_type_ids) == len(
tail_mem_label_ids), "tail mem 数据生成错误"
if len(tail_mem_input_ids) > self.max_len:
# todo:需要从desc进行下手裁剪
pass
else:
pad_len = self.max_len - len(tail_mem_input_ids)
tail_mem_input_ids += [0] * pad_len
tail_mem_attention_mask += [0] * pad_len
tail_mem_token_type_ids += [0] * pad_len
tail_mem_label_ids += [-100] * pad_len
rel_mrm_input_ids = torch.tensor(rel_mrm_input_ids).long()
rel_mrm_attention_mask = torch.tensor(rel_mrm_attention_mask).bool()
rel_mrm_token_type_ids = torch.tensor(rel_mrm_token_type_ids).long()
rel_mrm_label_ids = torch.tensor(rel_mrm_label_ids).long()
mrm_res = {
'input_ids': rel_mrm_input_ids,
'attention_mask': rel_mrm_attention_mask,
'token_type_ids': rel_mrm_token_type_ids,
'label_ids': rel_mrm_label_ids
}
head_mem_input_ids = torch.tensor(head_mem_input_ids).long()
head_mem_attention_mask = torch.tensor(head_mem_attention_mask).bool()
head_mem_token_type_ids = torch.tensor(head_mem_token_type_ids).long()
head_mem_label_ids = torch.tensor(head_mem_label_ids).long()
head_mem_res = {
'input_ids': head_mem_input_ids,
'attention_mask': head_mem_attention_mask,
'token_type_ids': head_mem_token_type_ids,
'label_ids': head_mem_label_ids
}
tail_mem_input_ids = torch.tensor(tail_mem_input_ids).long()
tail_mem_attention_mask = torch.tensor(tail_mem_attention_mask).bool()
tail_mem_token_type_ids = torch.tensor(tail_mem_token_type_ids).long()
tail_mem_label_ids = torch.tensor(tail_mem_label_ids).long()
tail_mem_res = {
'input_ids': tail_mem_input_ids,
'attention_mask': tail_mem_attention_mask,
'token_type_ids': tail_mem_token_type_ids,
'label_ids': tail_mem_label_ids
}
return head_mem_res, tail_mem_res, mrm_res
def head_entity_mlm(self, head_ent):
"""
这是对head entity的随机mask
对head ent进行完全mask,对tail ent和tail desc进行mlm任务
:param head_ent
:param head_desc
:return:
"""
# head_token_ids = self.tokenizer.encode(head_ent)[1:-1]
# head_desc_ids = self.tokenizer.encode(head_desc)[1:-1]
# rel_ids = self.tokenizer.encode(rel)[1:-1]
# head_labels =deepcopy(head_token_ids)
head_tokens = self.tokenizer.tokenize(head_ent)
head_tokens_ids = self.tokenizer.convert_tokens_to_ids(head_tokens)
# 对head ent进行完全mask
head_mem_label = deepcopy(head_tokens_ids)
head_tokens_ids = [self.tokenizer.convert_tokens_to_ids('[MASK]')] * len(head_tokens)
return head_tokens_ids, head_mem_label
def tail_entity_mlm(self, tail_ent):
"""
这是对head entity的随机mask
对head ent进行完全mask,对tail ent和tail desc进行mlm任务
:param head_ent
:param head_desc
:return:
"""
tail_tokens = self.tokenizer.tokenize(tail_ent)
tail_tokens_ids = self.tokenizer.convert_tokens_to_ids(tail_tokens)
# 对tao; ent进行完全mask
tail_mem_label = deepcopy(tail_tokens_ids)
tail_tokens_ids = [self.tokenizer.convert_tokens_to_ids('[MASK]')] * len(tail_tokens)
return tail_tokens_ids, tail_mem_label
def rel_mlm(self, head_ent, head_desc, tail_ent, tail_desc, rel):
rel_tokens = self.tokenizer.tokenize(rel)
rel_tokens_ids = self.tokenizer.convert_tokens_to_ids(rel_tokens)
# rel 进行完全mask
rel_mrm_label = deepcopy(rel_tokens_ids)
rel_tokens_ids = [self.tokenizer.convert_tokens_to_ids('[MASK]')] * len(rel_tokens_ids)
head_ent_desc = head_ent + " " + head_desc
tokens = self.tokenizer.tokenize(head_ent_desc)
head_ent_desc_ids = self.tokenizer.convert_tokens_to_ids(tokens)
head_ent_mlm_label = [-100] * len(head_ent_desc_ids)
for i, token in enumerate(tokens):
if i == 0 or i == len(tokens) - 1:
continue
prob = random.random()
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.9:
# 这是whole word mask的中间subword
if len(tokens[i]) > 1 and tokens[i][0] == '#':
self.label_mlm(head_ent_desc_ids, head_ent_mlm_label, tokens, i, special_token='[MASK]')
else:
if i + 1 < len(tokens) and len(tokens[i + 1]) > 1 and tokens[i + 1][0] == '#':
# 这个情况是,一个word是由多个subword组成,这里正好选到了word的第一个subword,因此需要对整个mask
self.label_mlm(head_ent_desc_ids, head_ent_mlm_label, tokens, i + 1, special_token='[MASK]')
else:
head_ent_desc_ids[i] = self.tokenizer.convert_tokens_to_ids('[MASK]')
head_ent_mlm_label[i] = self.tokenizer.convert_tokens_to_ids(token)
else: # 85%的概率不修改
pass
# 对tail_ent+tail_desc进行MLM
tail_ent_desc = tail_ent + " " + tail_desc
tokens = self.tokenizer.tokenize(tail_ent_desc)
tail_ent_desc_ids = self.tokenizer.convert_tokens_to_ids(tokens)
tail_mlm_label = [-100] * len(tail_ent_desc_ids)
for i, token in enumerate(tokens):
if i == 0 or i == len(tokens) - 1:
continue
prob = random.random()
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.9:
# 这是whole word mask的中间subword
if len(tokens[i]) > 1 and tokens[i][0] == '#':
self.label_mlm(tail_ent_desc_ids, tail_mlm_label, tokens, i, special_token='[MASK]')
else:
if i + 1 < len(tokens) and len(tokens[i + 1]) > 1 and tokens[i + 1][0] == '#':
# 这个情况是,一个word是由多个subword组成,这里正好选到了word的第一个subword,因此需要对整个mask
self.label_mlm(tail_ent_desc_ids, tail_mlm_label, tokens, i + 1, special_token='[MASK]')
else:
tail_ent_desc_ids[i] = self.tokenizer.convert_tokens_to_ids('[MASK]')
tail_mlm_label[i] = self.tokenizer.convert_tokens_to_ids(token)
else: # 85%的概率不修改
pass
return rel_tokens_ids, rel_mrm_label, head_ent_desc_ids, head_ent_mlm_label, tail_ent_desc_ids, tail_mlm_label
def label_mlm(self, input_ids, mlm_label, tokens, cur_pos, special_token='[MASK]'):
"""
这是开始以cur_pos为中心,向前后进行试探,然后对whole word进行mask
:param mlm_label:
:param tokens:
:param cur_pos:
:return:
"""
# 从当前到后 [cur_pos:]
index_ = cur_pos
lens = len(mlm_label)
if special_token == '[MASK]':
while index_ < lens: # 这是从当前向后查找
if len(tokens[index_]) > 1 and tokens[index_][0] == '#':
input_ids[index_] = self.tokenizer.convert_tokens_to_ids(special_token)
mlm_label[index_] = self.tokenizer.convert_tokens_to_ids(tokens[index_])
else:
break
index_ += 1
index_ = cur_pos - 1
while index_ >= 0: # 这是从当前向前查找
if len(tokens[index_]) > 1 and tokens[index_][0] == '#':
input_ids[index_] = self.tokenizer.convert_tokens_to_ids(special_token)
mlm_label[index_] = self.tokenizer.convert_tokens_to_ids(tokens[index_])
else:
input_ids[index_] = self.tokenizer.convert_tokens_to_ids(special_token)
mlm_label[index_] = self.tokenizer.convert_tokens_to_ids(tokens[index_])
break
index_ -= 1
# 注意这里,在向前查找的时候,第一个单词是没有#,这个时候别忘了处理
else:
raise NotImplementedError
def generate_lp_dataset(tokenizer, max_len=512):
'''
将tokenize之后的数据convert_to_ids
:param tokenizer:
:param file_path:
:param target_file_path:
:param max_len:
:return:
'''
with open("../../umls/umls_triplets.txt", 'r', encoding='utf-8') as f:
triplets = f.readlines()
print("读取三元组完成")
with open("../../umls/cui2desc.json", 'r', encoding='utf-8') as f:
cui2desc = json.load(f)
print("读取cui2desc完成")
with open("../../umls/cui2concept.json", 'r', encoding='utf-8') as f:
cui2concepts = json.load(f)
print("读取cui2concept完成")
train_dataset = LpBERTDataset(max_len, triplets, cui2concepts, cui2desc, tokenizer)
train_loader = DataLoader(dataset=train_dataset, num_workers=0, batch_size=1,collate_fn=train_dataset.collate_fn)
new_line = []
file_idx = 1
for step, batch_data in tqdm(enumerate(train_loader), total=len(train_dataset)):
if batch_data[0] is None:
continue
head_mem_res, tail_mem_res, mrm_res = batch_data[0]
for key in ['input_ids','attention_mask','token_type_ids','label_ids']:
head_mem_res[key] = head_mem_res[key].squeeze().numpy().tolist()
tail_mem_res[key] = tail_mem_res[key].squeeze().numpy().tolist()
mrm_res[key] = mrm_res[key].squeeze().numpy().tolist()
new_line.append((head_mem_res, tail_mem_res, mrm_res))
if len(new_line) >150000:
file_path = '../../Lpcorpus/{}_150000.pk'.format(file_idx)
print("当前保存到文件", file_path)
with open(file_path, 'wb') as f:
pickle.dump(new_line, f)
new_line = []
file_idx += 1
# print("文件存储到:{}".format(target_file_path))
if __name__ == '__main__':
import time
max_len = 512
tokenizer_file_path = '../../../embedding/scibert_scivocab_uncased'
try:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_file_path)
except:
tokenizer = BertTokenizer.from_pretrained(tokenizer_file_path)
generate_lp_dataset(tokenizer, max_len=max_len) | KeDaCoYa/MKG-GC | knowledge_embedding/src/utils/lpbert_dataset.py | lpbert_dataset.py | py | 17,076 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
... |
75260155305 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Tag, Recipe
from recipes.serializers import TagSerializer
TAG_URL = reverse("recipes:tag-list")
def sample_user(email="sample@gmail.com", password="pass1234test"):
return get_user_model().objects.create_user(email=email, password=password)
class PublicTestTagsApi(TestCase):
def setUp(self):
self.client = APIClient()
def test_require_authentication(self):
res = self.client.get(TAG_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTestTagsApi(TestCase):
def setUp(self):
self.user = sample_user()
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_list_tags(self):
Tag.objects.create(user=self.user, name="Vegan")
Tag.objects.create(user=self.user, name="Dessert")
res = self.client.get(TAG_URL)
tags = Tag.objects.all().order_by("-name")
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_user_tags_listed(self):
# Second user for test (shouldn't return this tag)
second_user_tag = Tag.objects.create(
user=sample_user(email="second@gmail.com"), name="Dinner"
)
Tag.objects.create(user=self.user, name="Vegan")
Tag.objects.create(user=self.user, name="Dessert")
res = self.client.get(TAG_URL)
user_tags = Tag.objects.filter(user=self.user)
serializer = TagSerializer(user_tags, many=True)
# The second tag will be the second_user_tag because it's ordered
self.assertNotEqual(second_user_tag.name, res.data[1]["name"])
self.assertEqual(len(res.data), 2)
self.assertEqual(res.data, serializer.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_tag_endpoint_sucess(self):
payload = {"name": "new tag"}
self.client.post(TAG_URL, payload)
exists = Tag.objects.filter(
user=self.user, name=payload["name"]
).exists()
self.assertTrue(exists)
def test_create_tag_endpoint_invalid(self):
payload = {"name": ""}
res = self.client.post(TAG_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
tag1 = Tag.objects.create(user=self.user, name="Breakfast")
tag2 = Tag.objects.create(user=self.user, name="Lunch")
recipe = Recipe.objects.create(
title="Coriander eggs on toast",
time_minutes=10,
price=5.00,
user=self.user,
)
recipe.tags.add(tag1)
res = self.client.get(TAG_URL, {"assigned_only": 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
tag = Tag.objects.create(user=self.user, name="Breakfast")
Tag.objects.create(user=self.user, name="Lunch")
recipe1 = Recipe.objects.create(
title="Pancakes", time_minutes=5, price=3.00, user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title="Porridge", time_minutes=3, price=2.00, user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAG_URL, {"assigned_only": 1})
self.assertEqual(len(res.data), 1)
| trolliama/recipes-api | app/recipes/tests/test_tags_api.py | test_tags_api.py | py | 3,796 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.reverse",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.test.TestCase",
"line_number": 19,
"usage_type": "name"
},
{
"api_n... |
11188337575 | import subprocess
import shlex
import re
import os
import time
import platform
import json
import sys
import base64
import random
import datetime
import traceback
import robot_util
import _thread
import copy
import argparse
#import audio_util
import urllib.request
import rtc_signaling
from subprocess import Popen, PIPE
from threading import Thread
from queue import Queue
class DummyProcess:
def poll(self):
return None
def __init__(self):
self.pid = 123456789
parser = argparse.ArgumentParser(description='robot control')
parser.add_argument('camera_id')
parser.add_argument('window_title')
parser.add_argument('dummy_crap')
parser.add_argument('--api-server', help="Server that robot will connect to listen for API update events", default='http://api.robotstreamer.com:8080')
parser.add_argument('--xres', type=int, default=768)
parser.add_argument('--yres', type=int, default=432)
parser.add_argument('--audio-device-number', default=1, type=int)
parser.add_argument('--audio-device-name')
parser.add_argument('--kbps', default=350, type=int)
parser.add_argument('--brightness', type=int, help='camera brightness')
parser.add_argument('--contrast', type=int, help='camera contrast')
parser.add_argument('--saturation', type=int, help='camera saturation')
parser.add_argument('--rotate180', default=False, type=bool, help='rotate image 180 degrees')
parser.add_argument('--env', default="prod")
parser.add_argument('--screen-capture', dest='screen_capture', action='store_true') # tells windows to pull from different camera, this should just be replaced with a video input device option
parser.set_defaults(screen_capture=False)
parser.add_argument('--no-mic', dest='mic_enabled', action='store_false')
parser.set_defaults(mic_enabled=True)
parser.add_argument('--audio-rate', default=44100, type=int, help="this is 44100 or 48000 usually")
parser.add_argument('--no-restart-on-video-fail', dest='restart_on_video_fail', action='store_true')
parser.set_defaults(restart_on_video_fail=True)
parser.add_argument('--no-audio-restart', dest='audio_restart_enabled', action='store_false')
parser.set_defaults(audio_restart_enabled=True)
parser.add_argument('--no-camera', dest='camera_enabled', action='store_false')
parser.set_defaults(camera_enabled=True)
parser.add_argument('--dry-run', dest='dry_run', action='store_true')
parser.add_argument('--mic-channels', type=int, help='microphone channels, typically 1 or 2', default=1)
parser.add_argument('--audio-input-device', default='Microphone (HD Webcam C270)') # currently, this option is only used for windows screen capture
parser.add_argument('--stream-key', default='hellobluecat')
parser.add_argument('--ffmpeg-path', default='/usr/local/bin/ffmpeg')
commandArgs = parser.parse_args()
apiServer = commandArgs.api_server
streamKey = commandArgs.stream_key
lastCharCount = None
robotSettings = None
resolutionChanged = False
currentXres = None
currentYres = None
audioProcess = None
videoProcess = None
def getVideoSFU():
url = '%s/v1/get_endpoint/webrtc_sfu/100' % (apiServer)
response = robot_util.getWithRetry(url)
return json.loads(response)
def overrideSettings(commandArgs, onlineSettings):
global resolutionChanged
global currentXres
global currentYres
resolutionChanged = False
c = copy.deepcopy(commandArgs)
print("onlineSettings:", onlineSettings)
if 'mic_enabled' in onlineSettings:
c.mic_enabled = onlineSettings['mic_enabled']
if 'xres' in onlineSettings:
if currentXres != onlineSettings['xres']:
resolutionChanged = True
c.xres = onlineSettings['xres']
currentXres = onlineSettings['xres']
if 'yres' in onlineSettings:
if currentYres != onlineSettings['yres']:
resolutionChanged = True
c.yres = onlineSettings['yres']
currentYres = onlineSettings['yres']
print("onlineSettings['mic_enabled']:", onlineSettings['mic_enabled'])
return c
def refreshFromOnlineSettings():
global robotSettings
global resolutionChanged
print("refreshing from online settings")
#onlineSettings = getOnlineRobotSettings(robotID)
#robotSettings = overrideSettings(commandArgs, onlineSettings)
robotSettings = commandArgs
if not robotSettings.mic_enabled:
print("KILLING**********************")
if audioProcess is not None:
print("KILLING**********************")
audioProcess.kill()
if resolutionChanged:
print("KILLING VIDEO DUE TO RESOLUTION CHANGE**********************")
if videoProcess is not None:
print("KILLING**********************")
videoProcess.kill()
else:
print("NOT KILLING***********************")
def startVideoRtc(videoEndpoint, SSRC):
videoHost = videoEndpoint['localIp']
videoPort = videoEndpoint['localPort']
print("startVideoRtc endpoints:", videoHost, videoPort)
videoCommandLine = 'ffmpeg -r 30 -f dshow -i video="OBS-Camera" -video_size {xres}x{yres} \
-map 0:v:0 -pix_fmt yuv420p -c:v libx264 -b:v {kbps}k -preset ultrafast -g 50 -f tee \
\"[select=v:f=rtp:ssrc={SSRC}:payload_type=101]rtp://{video_host}:{video_port}\"'\
.format(kbps=robotSettings.kbps,
video_host=videoHost,
video_port=videoPort,
SSRC=SSRC,
xres=robotSettings.xres,
yres=robotSettings.yres)
print(videoCommandLine)
return subprocess.Popen(shlex.split(videoCommandLine))
def startAudioRtc(audioEndpoint, SSRC):
audioHost = audioEndpoint['localIp']
audioPort = audioEndpoint['localPort']
print("startAudioRtc endpoints:", audioHost, audioPort)
audioCommandLine = 'ffmpeg -f dshow -i audio="OBS-Audio" -map 0:a:0 -acodec libopus -ab 128k -ac 2 -ar 48000 -f tee \"[select=a:f=rtp:ssrc=%s:payload_type=100]rtp://%s:%s\"'\
% ( #robotSettings.audio_rate, #locked for now
#robotSettings.mic_channels,
str(SSRC),
audioHost,
audioPort,
)
print(audioCommandLine)
return subprocess.Popen(shlex.split(audioCommandLine))
def startDualTest(videoEndpoint, SSRCV, audioEndpoint, SSRCA):
audioHost = audioEndpoint['localIp']
audioPort = audioEndpoint['localPort']
videoHost = videoEndpoint['localIp']
videoPort = videoEndpoint['localPort']
print("startDualTest endpoints:", videoHost, videoPort, audioHost, audioPort)
videoCommandLine = 'ffmpeg\
-r 30 -f dshow -i video="OBS-Camera" \
-f dshow -i audio="OBS-Audio" \
-pix_fmt yuv420p -c:v libx264 -b:v {kbps}k -maxrate {kbps}k -minrate {kbps}k -bufsize 100k -g 50 -preset ultrafast -map 0:v:0 \
-c:a libopus -b:a 128k -ac 2 -ar 48000 -map 1:a:0\
-f tee "[select=a:f=rtp:ssrc={SSRCA}:payload_type=100]rtp://{audio_host}:{audio_port}|[select=v:f=rtp:ssrc={SSRCV}:payload_type=101]rtp://{video_host}:{video_port}"'\
.format(kbps=robotSettings.kbps,
audio_host=audioHost,
audio_port=audioPort,
video_host=videoHost,
video_port=videoPort,
SSRCA=SSRCA,
SSRCV=SSRCV,
xres=robotSettings.xres,
yres=robotSettings.yres)
print(videoCommandLine)
return subprocess.Popen(shlex.split(videoCommandLine))
def main():
global robotID
global audioProcess
global videoProcess
refreshFromOnlineSettings()
# overrides command line parameters using config file
print("args on command line:", commandArgs)
print("camera id:", commandArgs.camera_id)
print("args after loading from server:", robotSettings)
print (streamKey)
videoSSRC = int(random.randint(10000,99999))
audioSSRC = int(random.randint(10000,99999))
peerID = str(random.randint(100000,999999)) #need to ditch peer ids anyway
print("videoSSRC: ", videoSSRC)
print("audioSSRC: ", audioSSRC)
videoSFU = getVideoSFU()
print("webrtc SFU: ", videoSFU)
robotID = str(int(commandArgs.camera_id) - int(100)) #just for temp compatability
print("robotID: ", robotID)
ws = rtc_signaling.SFUClient('wss://'+str(videoSFU['host'])+':'+str(videoSFU['port'])\
+'/?roomId='+robotID+'&peerId=p:robot_'+peerID, protocols=['protoo'])
ws.init(streamKey, videoSSRC, audioSSRC)
ws.connect()
ws.getRouterRtpCapabilities() #not required
ws.requestPlainTransportVideo() #build transports then producers
ws.requestPlainTransportAudio() #build transports then producers
#janky blocking. this is just a test afterall
while ws.videoEndpoint == False:
pass
while ws.audioEndpoint == False:
pass
# startVideoRtc(ws.videoEndpoint, videoSSRC)
# startAudioRtc(ws.audioEndpoint, audioSSRC)
startDualTest(ws.videoEndpoint, videoSSRC, ws.audioEndpoint, audioSSRC)
sys.stdout.flush()
ws.run_forever()
main()
| robotstreamer/robotstreamer_win_obs | send_video_obs_webrtc.py | send_video_obs_webrtc.py | py | 9,495 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "robot_util.getWithRetry",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "copy.deepc... |
34450879227 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from matplotlib.lines import Line2D
import pandas as pd
#Max R during main sequence and max R in general
z = [0.001, 0.02]
Rgen = np.zeros([70,3])
Rms = np.zeros([70,3])
n = 0
for metal in z:
i = 0.1 #Add a loop here
while(i < 100.02):
df = pd.read_csv(f'evolve_{i:.1f}_{metal}', delim_whitespace=True)
df.columns = ['C1', 'Star Type', 'C3', 'Mass [Sun Mass]', 'C5', 'Radius [Log10]', 'C7', 'C8', 'C9', 'C10', 'C11']
'''
Finding max radius
'''
max_row = df[df['Radius [Log10]'] == df['Radius [Log10]'].max()]
max_radius = max_row['Radius [Log10]'].values[0]
'''
Finding max radius in main sequence
'''
if((i <= 0.6 and metal == 0.001) or (i <= 0.7 and metal == 0.02)):
filtered_df = df[df['Star Type'] == 0]
else:
filtered_df = df[df['Star Type'] == 1]
max_row_ms = filtered_df[filtered_df['Radius [Log10]'] == filtered_df['Radius [Log10]'].max()]
max_radius_ms = max_row_ms['Radius [Log10]'].values[0]
Rgen[n] = (metal, i, max_radius)
Rms[n] = (metal, i, max_radius_ms)
n += 1
if(i < 0.99):
i += 0.1
elif(i < 9.99):
i += 1
elif(i < 24.99):
i += 2.5
elif(i < 49.99):
i += 5
else:
i += 10
#HR Diagram part
f = open("evolve_2.0_0.001", "r")
data1 = f.readlines()[1:]
f.close()
teff1 = np.zeros(len(data1))
l1 = np.zeros(len(data1))
i = 0
typ1 = np.zeros(len(data1))
for x in data1:
teff1[i] = x.split()[6]
l1[i] = x.split()[4]
typ1[i] = x.split()[1]
i += 1
f = open("evolve_6.0_0.02", "r")
data2 = f.readlines()[1:]
f.close()
teff2 = np.zeros(len(data2))
l2 = np.zeros(len(data2))
i = 0
typ2 = np.zeros(len(data2))
for x in data2:
teff2[i] = x.split()[6]
l2[i] = x.split()[4]
typ2[i] = x.split()[1]
i += 1
#Plot map for HR diagram
type_colors = {
0: 'violet',
1: 'red',
2: 'blue',
3: 'green',
4: 'orange',
5: 'purple',
6: 'cyan',
7: 'magenta',
8: 'yellow',
9: 'brown',
10: 'pink',
11: 'lime',
}
cmap = ListedColormap([type_colors[type_val] for type_val in typ1])
unique_types = np.unique(typ1)
legend_elemts = [Line2D([0], [0], marker='o', color='w', label=f'{i}', markerfacecolor=type_colors[i], markersize=5)
for i in unique_types]
plt.figure('HR diagram')
plt.scatter(teff1,l1, s=2, c=typ1, cmap=cmap)
plt.text(4.09, 1.4, 'M = 2 \n z = 0.001', fontsize=7, c='black', fontweight='bold')
plt.scatter(teff2,l2, s=2, c=typ2, cmap=cmap)
plt.text(4.28, 3, 'M = 6 \n z = 0.02', fontsize=7, c='black', fontweight='bold')
plt.xticks(np.arange(3,5.5,0.25))
plt.yticks(np.arange(-5.5, 5, 0.5))
plt.xlim(5.5,3)
plt.ylim(-5.6, 5)
plt.xlabel('Effective Temperature (log10) [K]')
plt.ylabel('Effective Luminosity (log10) [Sun Luminosity]')
plt.legend(handles=legend_elemts, title='Star Types')
type_colors_2 = {
0.001: 'red',
0.02: 'blue',
}
cmap2 = ListedColormap([type_colors_2[val] for val in Rms[:,0]])
legend_labels_ms = [plt.Line2D([0], [0], marker='o', color='w', label=key, markerfacecolor=value)
for key, value in type_colors_2.items()]
plt.figure('Main Sequence Max Radius-Mass Relation')
plt.scatter(Rms[:,1], Rms[:,2], s=2, c=Rms[:,0], cmap=cmap2)
plt.xlabel('Mass [Sun Mass]')
plt.ylabel('Max Radius [Log10 [Sun Radius]')
plt.title('Max Radius-Mass Relation for Main Sequence')
plt.legend(handles=legend_labels_ms, title='Metalicity')
type_colors_3 = {
0.001: 'red',
0.02: 'blue',
}
cmap3 = ListedColormap([type_colors_3[val] for val in Rgen[:,0]])
legend_labels_gen = [plt.Line2D([0], [0], marker='o', color='w', label=key, markerfacecolor=value)
for key, value in type_colors_3.items()]
plt.figure('Max Radius-Mass Relation - all types')
plt.scatter(Rgen[:,1], Rgen[:,2], s=2, c=Rgen[:,0], cmap=cmap3)
plt.xlabel('Mass [Sun Masses]')
plt.ylabel('Max Radius [Log10] [Sun Radius]')
plt.title('Max Radius-Mass Relation for the entire evolution process')
plt.legend(handles=legend_labels_gen, title='Metalicity')
plt.show()
| kajasiek/Astrophysics5 | Task 2/task2-data.py | task2-data.py | py | 4,441 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_numbe... |
36902270914 | from fenics import *
from mshr import *
import scipy.io
import numpy as np
data = scipy.io.loadmat('neuron_input_1.hdf5')
if MPI.rank(MPI.comm_world) == 0:
print(data.keys())
x = data['x']
y = data['y']
z = data['z']
xlen = x.max() - x.min()
ylen = y.max() - y.min()
zlen = z.max() - z.min()
if MPI.rank(MPI.comm_world) == 0:
print(xlen, ylen, zlen)
padding_fraction = 0.1
x0_mesh = x.min() - padding_fraction*xlen
x1_mesh = x.max() + padding_fraction*xlen
y0_mesh = y.min() - padding_fraction*ylen
y1_mesh = y.max() + padding_fraction*ylen
z0_mesh = z.min() - padding_fraction*zlen
z1_mesh = z.max() + padding_fraction*zlen
xmid_mesh = (x.min() + x.max())/2.
ymid_mesh = (y.min() + y.max())/2.
zmid_mesh = (z.min() + z.max())/2.
r_mesh = np.sqrt((xlen/2.)**2 + (zlen/2.)**2)
r_mesh += 2*r_mesh
p_top = Point(xmid_mesh, y0_mesh, zmid_mesh)
p_bottom = Point(xmid_mesh, y1_mesh, zmid_mesh)
domain = Cylinder(p_top, p_bottom, r_mesh, r_mesh)
# domain = Box(Point(x0_mesh, y0_mesh, z0_mesh), Point(x1_mesh, y1_mesh, z1_mesh))
resolution = 40
mesh = generate_mesh(domain, resolution)
if MPI.rank(MPI.comm_world) == 0:
print(mesh.coordinates().shape)
f = HDF5File(mesh.mpi_comm(), "mesh_co.hdf5", 'w')
f.write(mesh, 'mesh')
| CINPLA/KNPsim | examples/hay_model/make_mesh.py | make_mesh.py | py | 1,247 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "scipy.io.io.loadmat",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "scipy.io.io",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scipy.io",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_num... |
17752793066 | import socket
from jsonNetwork import Timeout, sendJSON, receiveJSON, NotAJSONObject, fetch
from threading import Thread, Timer
import importlib
import sys
from championship import Championship, addPlayer, getAllPlayers, getState, changePlayerStatus, updateState, hookRegister
from graphics import ui
def checkClient(address):
'''
Ping client
'''
print('checking client {}:'.format(address), end=' ')
try:
response = fetch(address, {
'request': 'ping'
})
if response['response'] == 'pong':
status = 'online'
else:
raise ValueError()
except:
status = 'lost'
print(status)
return status
def checkAllClient():
for client in getAllPlayers(getState()):
status = checkClient(client['address'])
updateState(changePlayerStatus(client['address'], status))
def finalizeSubscription(address, name, matricules):
'''
Add client if successfully pinged
'''
status = checkClient(address)
if status == 'online':
updateState(addPlayer(name, address, matricules))
def startSubscription(client, address, request):
'''
Because client may be single threaded, he may start listening to request
after sending his substriction. We wait for 1 second before pinging him
'''
clientAddress = (address[0], int(request['port']))
print('Subscription received for {} with address {}'.format(request['name'], clientAddress))
if any([not isinstance(matricule, str) for matricule in request['matricules']]):
raise TypeError("Matricules must be strings")
sendJSON(client, {
'response': 'ok'
})
Timer(1, finalizeSubscription, [clientAddress, request['name'], request['matricules']]).start()
def processRequest(client, address):
'''
Route request to request handlers
'''
print('request from', address)
try:
request = receiveJSON(client)
if request['request'] == 'subscribe':
startSubscription(client, address, request)
else:
raise ValueError('Unknown request \'{}\''.format(request['request']))
except Timeout:
sendJSON(client, {
'response': 'error',
'error': 'transmition take too long'
})
except NotAJSONObject as e:
sendJSON(client, {
'response': 'error',
'error': str(e)
})
except KeyError as e:
sendJSON(client, {
'response': 'error',
'error': 'Missing key {}'.format(str(e))
})
except Exception as e:
sendJSON(client, {
'response': 'error',
'error': str(e)
})
def listenForRequests(port):
'''
Start thread to listen to requests.
Returns a function to stop the thread.
'''
running = True
def processClients():
with socket.socket() as s:
s.bind(('0.0.0.0', port))
s.settimeout(1)
s.listen()
print('Listen to', port)
while running:
try:
client, address = s.accept()
with client:
processRequest(client, address)
except socket.timeout:
pass
listenThread = Thread(target=processClients, daemon=True)
listenThread.start()
def stop():
nonlocal running
running = False
listenThread.join()
return stop
def formatClient(client):
return '{}: {}'.format(client['name'], client['points'])
if __name__ == '__main__':
args = sys.argv[1:]
port = 3000
gameName = None
for arg in args:
if arg.startswith('-port='):
port = int(arg[len('-port='):])
else:
gameName = arg
stopSubscriptions = listenForRequests(port)
Game = importlib.import_module('games.{}.game'.format(gameName)).Game
render = importlib.import_module('games.{}.render'.format(gameName)).render
hookRegister('matchEnd', checkAllClient)
stopChampionship = Championship(Game)
ui(gameName, render)
stopSubscriptions()
stopChampionship()
| jmimassi/IA-Abalone | server.py | server.py | py | 3,582 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jsonNetwork.fetch",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "championship.getAllPlayers",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "championship.getState",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "ch... |
8912694482 | from config.config_train import dataset_type
model_file = {
"noun" : { "pretrain" : "cnn_noun_pretrain.pt", "train" : "cnn_noun_train.pt" },
"pronoun" : { "pretrain" : "cnn_pronoun_pretrain.pt", "train" : "cnn_pronoun_train.pt" },
"verb" : { "pretrain" : "cnn_verb_pretrain.pt", "train" : "cnn_verb_train.pt" },
"adjective" : { "pretrain" : "cnn_adjective_pretrain.pt", "train" : "cnn_adjective_train.pt" },
"adverb" : { "pretrain" : "cnn_adverb_pretrain.pt", "train" : "cnn_adverb_train.pt" },
"conjunction" : { "pretrain" : "cnn_conjunction_pretrain.pt", "train" : "cnn_conjunction_train.pt" },
"preposition" : { "pretrain" : "cnn_preposition_pretrain.pt", "train" : "cnn_preposition_train.pt" },
"interjection" : { "pretrain" : "cnn_interjection_pretrain.pt", "train" : "cnn_interjection_train.pt" },
"decoder" : { "train" : "rnn_decoder.pt" }
}
dataset_file = {
"pretrain" : "cnn_pretrain_dataset.pkl",
"train" : "cnn_rnn_train_dataset.pkl",
"test" : "cnn_rnn_test_dataset.pkl",
"validation" : "cnn_rnn_validation_dataset.pkl",
"result" : "cnn_rnn_caption_result.pkl"
}
if dataset_type == 2:
dataset_file["pretrain"] = "cnn_pretrain_dataset_flickr8k.pkl"
dataset_file["train"] = "cnn_rnn_train_dataset_flickr8k.pkl"
dataset_file["test"] = "cnn_rnn_test_dataset_flickr8k.pkl"
dataset_file["validation"] = "cnn_rnn_validation_dataset_flickr8k.pkl"
dataset_file["result"] = "cnn_rnn_caption_result_flickr8k.pkl"
elif dataset_type == 3:
dataset_file["pretrain"] = "cnn_pretrain_dataset_flickr30k.pkl"
dataset_file["train"] = "cnn_rnn_train_dataset_flickr30k.pkl"
dataset_file["test"] = "cnn_rnn_test_dataset_flickr30k.pkl"
dataset_file["validation"] = "cnn_rnn_validation_dataset_flickr30k.pkl"
dataset_file["result"] = "cnn_rnn_caption_result_flickr30k.pkl"
dataset_skip_file = {
"train" : "skip_train.csv",
"validation" : "skip_validation.csv",
"noun" : "skip_noun.csv",
"pronoun" : "skip_pronoun.csv",
"verb" : "skip_verb.csv",
"adjective" : "skip_adjective.csv",
"adverb" : "skip_adverb.csv",
"conjunction" : "skip_conjunction.csv",
"preposition" : "skip_preposition.csv",
"interjection" : "skip_interjection.csv"
}
| philgookang/pcr | config/config_file.py | config_file.py | py | 2,527 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "config.config_train.dataset_type",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "config.config_train.dataset_type",
"line_number": 29,
"usage_type": "name"
}
] |
22533622469 | import requests
from config import API_KEY
# from flask import jsonify
def get_data(query):
# query = "Bread"
print(query)
url = "https://api.nal.usda.gov/fdc/v1/foods/search?"
query_url = f"{url}api_key={API_KEY}&query={query}"
x = requests.get(query_url)
# x = requests.get('https://api.nal.usda.gov/fdc/v1/foods/search?api_key=DEMO_KEY&query=Bread')
# x = requests.get('https://api.nal.usda.gov/fdc/v1/foods/list?api_key=DEMO_KEY')
# x = requests.get('https://api.nal.usda.gov/fdc/v1/json-spec?api_key=DEMO_KEY')
foods_list = x.json()
print(foods_list)
return foods_list
| reginesgit/Nutritional-Analysis-of-USDA-Foods | get_foods.py | get_foods.py | py | 634 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "config.API_KEY",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
}
] |
19699158710 | import json
MANDATORY_SETTINGS = ('FunctionName', 'Handler', 'Role', 'Runtime')
def load_settings(filepath):
with open(filepath, 'r') as f:
settings = json.loads(f.read())
for key in MANDATORY_SETTINGS:
try:
assert key in settings
except AssertionError:
raise KeyError('{} not found in the settings JSON.'.format(key))
return settings
| InfraPixels/powerlibs-aws-lambda-deployer | powerlibs/aws/λ/deployer/lambda_settings.py | lambda_settings.py | py | 398 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 8,
"usage_type": "call"
}
] |
30609717940 | # Python: v3.9.13, OS: Windows 11
import os
import sys
import json
import random
import geojson
import folium
import webbrowser
import pandas as pd
# GTFS folder location
input_folder = r'D:\dev\github\GTFS_Visualization\01_source\Open_Data_MMTS_Hyd'
# Output folder location to store geojson, html files
output_folder = r'D:\dev\github\GTFS_Visualization\03_out'
# If basemap_on = 'y' OSM will appear as basemap
basemap_on = ''
def bcg_map():
if basemap_on == 'y':
return 'openstreetmap'
else:
return None
# Create output folder if not exist
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Output file name prefix
output_file_prefix = 'script2_{}'.format(((input_folder.split('\\')[-1]).replace(' ','_')).lower())
# Reading text files
input_path_agencyTxt = os.path.join(input_folder, 'agency.txt')
input_path_routesTxt = os.path.join(input_folder, 'routes.txt')
input_path_tripsTxt = os.path.join(input_folder, 'trips.txt')
input_path_stopTimes = os.path.join(input_folder, 'stop_times.txt')
input_path_stopsTxt = os.path.join(input_folder, 'stops.txt')
# with open(input_path) as input_file:
# for line in input_file:
# temp_line = []
# line = line.rstrip('\n')
# temp_line = line.split(',')
# input_list.append(temp_line)
# Text file to List
def textToList(text_file):
t1 = pd.read_csv(text_file)
l1 = t1.to_dict('records')
return l1
# Conver text files into list
try:
list_agency = textToList(input_path_agencyTxt)
list_routes = textToList(input_path_routesTxt)
list_trips = textToList(input_path_tripsTxt)
list_stop_times = textToList(input_path_stopTimes)
list_stops = textToList(input_path_stopsTxt)
except:
print('''
Error: File missing
Make sure to run this script following files are available in input directory:
1. agency.txt
2. routes.txt
3. trips.txt
4. stops.txt
5. stop_times.txt''')
sys.exit()
# sorting up stop_times.txt based on stop_sequence
def sort_stop_sequence(e):
return e['stop_sequence']
list_stop_times.sort(key=sort_stop_sequence)
# Update route_color if not exist in routes.txt
for i in list_routes:
if not 'route_color' in i:
i['route_color'] = "%06x" % random.randint(0, 0xFFFFFF)
if 'route_color' in i:
if (str(i['route_color']) == 'nan' or str(i['route_color']) == ''):
i['route_color'] = "%06x" % random.randint(0, 0xFFFFFF)
# Storing list of Stops per Trips
dct_trip_stop = {}
for items in list_stop_times:
if items['trip_id'] not in dct_trip_stop:
dct_trip_stop[items['trip_id']] = [items['stop_id']]
else:
dct_trip_stop[items['trip_id']].append(items['stop_id'])
# Storing list of Trips per Routes
dct_route_trip = {}
for items in list_trips:
if items['route_id'] not in dct_route_trip:
dct_route_trip[items['route_id']] = [items['trip_id']]
else:
dct_route_trip[items['route_id']].append(items['trip_id'])
# Storing route_id, stop_id, trip_id, route_color in a new list
list_1 = []
for key,value in dct_route_trip.items():
for i in value:
for k,v in dct_trip_stop.items():
dct_1 = {}
if i == k:
dct_1['route_id'] = key
dct_1['stop_id'] = v
dct_1['trip_id'] = k
for r in list_routes:
if key == r['route_id']:
dct_1['route_color'] = r['route_color']
break
list_1.append(dct_1)
break
# Storing Routes with unique Stops in a new list
list_unique_route = []
cnt = 0
for i in list_1:
if cnt == 0:
list_unique_route.append({'route_id': i['route_id'], 'stop_id': i['stop_id'], 'route_color': i['route_color']})
cnt += 1
for j in list_unique_route:
if (str(i['route_id']) == str(j['route_id']) and str(i['stop_id']) == str(j['stop_id'])):
chck = 1
if chck == 0:
list_unique_route.append({'route_id': i['route_id'], 'stop_id': i['stop_id'], 'route_color': i['route_color']})
chck = 0
# Adding up stop_name, stop_location
list_all = []
for i in list_unique_route:
lst_geo = []
dct_2 = {}
start_stop = i['stop_id'][0]
end_stop = i['stop_id'][-1]
start_stop_name = 'dummy'
end_stop_name = 'dummy'
for j in i['stop_id']:
for k in list_stops:
if (j == start_stop and k['stop_id'] == start_stop):
start_stop_name = k['stop_name']
if (j == end_stop and k['stop_id'] == end_stop):
end_stop_name = k['stop_name']
if str(j) == str(k['stop_id']):
lst_geo.append([k['stop_lon'], k['stop_lat']])
break
dct_2['routeId'] = i['route_id']
dct_2['stops'] = i['stop_id']
dct_2['stop_count'] = len(i['stop_id'])
dct_2['geometry'] = lst_geo
dct_2['from_to'] = '{} TO {}'.format(start_stop_name, end_stop_name)
dct_2['routeColor'] = i['route_color']
list_all.append(dct_2)
# convert to geojson
def shape_to_feature(routeId, fromTo, stopCount, routeColor, geo):
return {
'type': 'Feature',
'geometry': {'type':'LineString', 'coordinates': geo},
'properties': {
'route_id': routeId,
'from_to': fromTo,
'stop_count': stopCount,
'route_color': '#{}'.format(routeColor)
}
}
def stops_to_feature(stop_lon, stop_lat, stop_name):
return {
'type': 'Feature',
'geometry': {'type':'Point', 'coordinates': [stop_lon, stop_lat]},
'properties': {
'stop_name': stop_name
}
}
shapes_geojson = geojson.FeatureCollection([
shape_to_feature(i['routeId'], i['from_to'], i['stop_count'], i['routeColor'], i['geometry'])
for i in list_all])
stops_geojson = geojson.FeatureCollection([
stops_to_feature(i['stop_lon'], i['stop_lat'], i['stop_name'])
for i in list_stops])
# write geojson
shapes_geojson_path = os.path.join(output_folder, '{}_shapes.geojson'.format(output_file_prefix))
with open(shapes_geojson_path, 'w') as f:
json.dump(shapes_geojson, f)
stops_geojson_path = os.path.join(output_folder, '{}_stops.geojson'.format(output_file_prefix))
with open(stops_geojson_path, 'w') as f:
json.dump(stops_geojson, f)
# initiate map object
m = folium.Map(
#location = [10.0727,76.3336],
#tiles='cartodbpositron',
tiles = bcg_map(),
zoom_start = 16,
control_scale = True)
# Feature group to store layers
shape_Layer = folium.FeatureGroup(name='shapes_geom').add_to(m)
stops_Layer = folium.FeatureGroup(name='stops_geom').add_to(m)
# Adding map heading
map_heading = list_agency[0]['agency_name'].upper()
title_html = '''
<h3 align="center" style="font-size:16px"><b>{}</b></h3>
'''.format(map_heading)
m.get_root().html.add_child(folium.Element(title_html))
# specifying properties from GeoJSON
shapes_style_function = lambda x: {
'color': x['properties']['route_color'],
'opacity': 0.6,
'weight': '4',
#'dashArray': '3,6'
}
shapes_highlight_function = lambda x: {
'color': 'yellow',
'opacity': 1,
'weight': '10',
#'dashArray': '3,6'
}
# Plotting geojson
stops_map = folium.GeoJson(
stops_geojson_path,
name = 'stops',
control = True,
# marker = folium.Marker( # Radius in metres
# icon_size = 0, #outline weight
# icon = folium.Icon(color='darkblue'),
# fill_opacity = 1),
tooltip= folium.GeoJsonTooltip(
fields=['stop_name'],
aliases=['Stop Name: '],
# setting style for popup box
style=("background-color: white; color: #333333; font-family: arial; font-size: 12px; padding: 10px;")
)
)
shapes_map = folium.GeoJson(
shapes_geojson_path,
name = 'shapes',
control = True,
style_function = shapes_style_function,
highlight_function = shapes_highlight_function,
tooltip=folium.GeoJsonTooltip(
# using fields from the geojson file
fields=['from_to', 'stop_count', 'route_id'],
aliases=['Route: ', 'Total Stops: ', 'Route_ID: '],
style=("background-color: white; color: #333333; font-family: arial; font-size: 12px; padding: 10px;")
)
)
shapes_map.add_to(shape_Layer)
stops_map.add_to(stops_Layer)
# m.add_child(stops_map)
# m.add_child(shapes_map)
folium.LayerControl().add_to(m)
# To zoom on data extent
m.fit_bounds(m.get_bounds(), padding=(30, 30))
# saving the map to html file and oppening it in default browser
html_path = os.path.join(output_folder, '{}_map.html'.format(output_file_prefix))
m.save(html_path)
webbrowser.open(html_path) | sahachandan/GTFS_Visualization | 02_script/visualize_routes_without_shapes_txt.py | visualize_routes_without_shapes_txt.py | py | 8,835 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numb... |
21654121522 | from requests import request
from json import loads
emails = [
("hello.world", "failure"),
("hello.world@company.com", "success"),
("hello.world@", "failure"),
("hello.world@.com", "failure"),
("hello.world@company.gov.in", "success"),
("hello.world@company.edu", "success")
]
for email in emails:
url = f"https://api.eva.pingutil.com/email?email={email[0]}"
# returns a Request object
response = request("GET", url)
# getting json string from response object
json_text = response.text
# deserilising the json string
rsp = loads(json_text)
# actual status getting from the response
actual_status = rsp['status']
if actual_status == email[1]:
print("PASS")
else:
print("FAIL") | Amitroshan1/python | JSON/email_api.py | email_api.py | py | 770 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
}
] |
59323449 | import f90nml
import sys
import os
import shutil
import numpy as np
import subprocess
import integral2d
import math
from scipy.special import *
cwd = os.getcwd()
args = sys.argv
root = args[1]
dest = args[2]
opt = args[3]
coef = float(dest)
fdtd_path = "/home/b/b36288/large0/drude/fdtd"
root_path = cwd+"/"+root
dest_path = cwd+"/"+dest
if not os.path.exists(dest_path):
os.mkdir(dest_path)
if os.path.exists(dest_path+"/fdtd"):
os.remove(dest_path+"/fdtd")
os.chdir(dest_path)
shutil.copyfile(root_path+"/job.sh",dest_path+"/job.sh")
os.symlink(fdtd_path,dest_path+"/fdtd")
with open(root_path+"/param.inp","r") as nml_file:
params = f90nml.read(nml_file)
dest_nml = params
with open(dest_path+"/param.inp","w") as nml_file:
f90nml.write(dest_nml,dest_path+"/param.inp",force=True)
params = f90nml.read("param.inp")
dx = params["space"]["dx"]
nx = params["space"]["nxx"]
lx = params["scatt"]["lx"]
dy = dx
c = 2.9979246e8
dt = params["time"]["deltat"]/(c*np.sqrt(1.0/(dx*dx)+1.0/(dy*dy)))
nstep= params["time"]["nstep"]
a = params["object"]["radius"]*dx
#lm = params["scatt"]["lambda"]*dx
freq = params["scatt"]["freq"]
phi0 = params["scatt"]["phi0"]
wp = params["plasma"]["wp"]
nu = params["plasma"]["nu"]
prad = params["plasma"]["prad"]*dx
cfreq = freq
lm = c/cfreq
comega=2*np.pi*cfreq
k0a = comega*a/c
tau0 = params["scatt"]["tau0"]
ntau0=int(tau0/dt)
radi0 = 1.74532925e-2
phi = radi0*phi0
r0x = np.cos(phi)
r0y = np.sin(phi)
vbc = c
wpw = wp/comega
nuw = nu/comega
def wpg():
px = int(nx/2)
py = px
ds = dx*dx
rad = int(a/dx)
pprad = int(prad/dx)
def radi(i,j):
return np.sqrt(((i-px))**2+((j-py))**2)
count = 0
nancount = 0
n = np.zeros([nx,nx])
lower = int(px-pprad*1.05)
upper = int(px+pprad*1.05)
for i in range(lower,upper):
for j in range(lower,upper):
if(radi(i,j)>=rad and radi(i,j)<pprad):
n[i,j] = wp*np.sqrt(jn(0,2.40*(radi(i,j)-rad)/(pprad-rad)))
if(math.isnan(n[i,j])):
n[i,j] = 0.0
nancount = nancount + 1
count = count + 1
dn = integral2d.integ2d(n,dx)
nds = ds*count
print("wp/w={:3f}".format(dn/nds/comega))
if(opt=="wp"):
wp = comega*coef
dest_nml["plasma"]["wp"] = wp
if(opt=="nu"):
nu = comega*coef
dest_nml["plasma"]["nu"] = nu
if(opt=="wpg"):
#rad50#wp = comega*coef*1.38555552
#wp = comega*coef*1.42467882
#rad100#wp = comega*coef*1.39210827
wp = comega*coef*1.42779182
dest_nml["plasma"]["wp"] = wp
dest_nml["plasma"]["pls"] = 2
if(opt=="rp"):
prad = int((lm*coef/2+a)/dx)
dest_nml["plasma"]["prad"] = prad
prad = prad*dx
with open(dest_path+"/param.inp","w") as nml_file:
f90nml.write(dest_nml,dest_path+"/param.inp",force=True)
print("freq:{:3e}".format(cfreq))
print("wp:{:3e}".format(wp))
print("nu:{:3e}".format(nu))
print("nx:",nx)
print("rad:",a)
print("prad:",prad)
print("lambda:",lm)
print("omega:{:3e}".format(comega))
print("ka:{:.3f}".format(k0a))
if(opt=="wpg"):
wpg()
else:
print("wp/w:{:.3f}".format(wp/comega))
print("nu/w:{:.3f}".format(nu/comega))
print("2(rp-rc)/lm:{:.3f}".format(2*(prad-a)/lm))
subprocess.run(["qsub","job.sh"])
'''
nml = {
"space":{
"nxx":3000,
"nyy":3000,
"dx":0.00025,
"dy":0.00025,
"abc":1,
"pbc":[0,0,0],
"lpml":[8,8,0]
},
"time":{
"deltat":0.80,
"nstep":12000
},
"output":{
"out":20000,
"ostart":0,
"odom":[0,0,0,0],
"stride":1,
"comp":[0,0,0,0,0,0,0,0,0],
"io":600,
"jo":1008
},
"scatt":{
"mode":3,
"lx":450,
"ly":450,
"gamma0":-90.0,
"thetat0":90.0,
"phi0":180.0,
"amp":1.0,
"lambda":400,
"tau0":1.0e-20
},
"far":{
"isx":5,
"isy":5,
"theta1":90.0,
"phi1":180.0,
},
"object":{
"obj":1,
"med":2,
"ic":508,
"jc":508,
"lx2":0,
"ly2":0,
"epsr":1.0,
"radius":40.0,
},
"feed":{
"lfeed":0,
"ip":0,
"jp":0,
"duration":0.0,
"t0":0.0
},
"wave":{
"kwave":0,
"amps":[0.0,0.0,1.0,0.0,1.0,0.0],
"orgs":[150.0,0.0,0.0],
"angs":[90.0,0.0,0.0],
"pt":5.0,
"pw":10.0
},
"plasma":{
"pls":1,
"prad":60,
"nu":0.0,
"wp":3.3903e10,
}
}
''' | takagi-junya/pyprogs | param.py | param.py | py | 4,849 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2... |
8446087178 | import pickle
import unittest
from cupy.cuda import cutensor
@unittest.skipUnless(cutensor.available, 'cuTensor is unavailable')
class TestExceptionPicklable(unittest.TestCase):
def test(self):
e1 = cutensor.CuTensorError(1)
e2 = pickle.loads(pickle.dumps(e1))
assert e1.args == e2.args
assert str(e1) == str(e2)
| cupy/cupy | tests/cupy_tests/cuda_tests/test_cutensor.py | test_cutensor.py | py | 353 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cupy.cuda.cutensor.CuTensorError",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cupy.cuda.cutensor",
"line_number": 11,
"usage_type": "name"
},
{
"api_nam... |
23959473202 | import pymongo
from datetime import datetime, timedelta
from db import DBHandler
from tinkof import FetchPrices, quotation2float
import asyncio
import os
connString = os.getenv('MONGODB_CONNSTRING')
dbName = os.getenv('MONGODB_DATABASE')
client = pymongo.MongoClient(connString)
db2 = client[dbName]
def CalcProfit(bond):
daysLeft = (bond['maturity_date']-datetime.now()).days
marketDiff = bond['nominal']-bond['market_price']-bond['aci_value']
if bond['market_price'] == 0:
return (0, 0)
if 'coupons' not in bond.keys():
return (marketDiff/bond['market_price']/daysLeft*365, 0)
if bond['coupons'] == []:
return (marketDiff/bond['market_price']/daysLeft*365, 0)
if bond['coupons'][-1]['coupon_type'] == 'COUPON_TYPE_CONSTANT':
c = sum([i['pay_one_bond'] for i in bond['coupons']])/bond['market_price']/daysLeft*365
m = marketDiff/bond['market_price']/daysLeft*365
return (c+m, c)
elif bond['coupons'][-1]['coupon_type'] == 'COUPON_TYPE_VARIABLE':
c = bond['coupons'][-1]['pay_one_bond']*len(bond['coupons'])/bond['market_price']/daysLeft*365
m = marketDiff/bond['market_price']/daysLeft*365
return (c+m, c)
else:
couponSum = sum([i['pay_one_bond'] for i in bond['coupons']])
fp = (couponSum+marketDiff)/bond['market_price']/daysLeft*365
cp = couponSum/bond['market_price']/daysLeft*365
return (cp, fp)
async def main():
db = DBHandler()
bonds = [i for i in (await db.GetAllBonds())['bonds']]
bond_figis = [i['figi'] for i in bonds]
prices = await FetchPrices(bond_figis, 't.CbJu2z3-n0MfU9Dbtbk9kxlGvnml00A7upkA6WvXDjQcpwmtqQyyJ4z00oS17cMfVFO_twNOZ5OcdHvMLyHbwg')
price_dict = {}
for i in prices:
price_dict[i.figi] = i.price
for bond in bonds:
bond['market_price'] = quotation2float(
price_dict[bond['figi']])*bond['nominal']/100
fp, cp = CalcProfit(bond)
bond['profitability'] = fp
bond['coupon_profitability'] = cp
for bond in bonds:
db2['bonds'].replace_one({"_id": bond["_id"]}, bond, upsert=True)
asyncio.run(main())
| Ne0Ment/nomisma | updateprices.py | updateprices.py | py | 2,162 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"li... |
29290633558 | __author__ = "Hao Qin"
__email__ = "awww797877@gmail.com"
import numpy as np
from data import dummy
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
BATCH_SIZE = 32
def get_y(poses, index_pose1, index_pose2, num_batch_images, max_transform, max_rotation):
transform = np.square(
poses[index_pose1:index_pose1 + num_batch_images, 0] - poses[index_pose2:index_pose2 + num_batch_images,
0])
transform += np.square(
poses[index_pose1:index_pose1 + num_batch_images, 1] - poses[index_pose2:index_pose2 + num_batch_images,
1])
# print(transform.shape)
transform = np.sqrt(transform)
rotation = np.absolute(
poses[index_pose1:index_pose1 + num_batch_images, 2] - poses[index_pose2:index_pose2 + num_batch_images,
2])
y = 20 * (transform + rotation) / (max_transform + max_rotation) # 1 / (1 + np.exp(- (transform + rotation)))
y = (1 / (1 + np.exp(y)) - 0.5) * 2
return y
if __name__ == '__main__':
poses = dummy.load_poses('/home/hao/others/data/CNN_SLAM/2012-04-06-11-15-29_part1_floor2.gt.laser.poses')
max_pose = np.amax(poses, axis=0)
min_pose = np.amin(poses, axis=0)
max_transform = np.sqrt(np.square(max_pose[0] - min_pose[0]) + np.square(max_pose[1] - min_pose[1]))
max_rotation = max_pose[2] - min_pose[2]
print(max_transform, max_rotation)
z = np.array([])
for offset1 in range(0, BATCH_SIZE, BATCH_SIZE):
temp = np.array([])
for offset2 in range(0, len(poses[0:1216]), BATCH_SIZE):
t = get_y(poses, offset1, offset2, BATCH_SIZE, max_transform, max_rotation)
print(t)
temp = np.append(temp, t)
z = np.vstack((z, temp)) if z.size else temp
| QinHarry/CNN_SLAM | temp.py | temp.py | py | 1,927 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "numpy.square",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_numbe... |
15985105295 | from inspect import signature
import torch
import torch.nn as nn
from mmcv.runner import force_fp32
from mmdet.core import images_to_levels, multi_apply, unmap, MaxIoUAssigner
from mmrotate.core import (build_assigner, obb2hbb, build_sampler,
rotated_anchor_inside_flags, )
from ..builder import ROTATED_HEADS, build_loss
from .rotated_anchor_head import RotatedAnchorHead
from mmcv.utils.logging import print_log, logger_initialized
from mmcv.cnn import initialize
@ROTATED_HEADS.register_module()
class RotatedEoodHead(RotatedAnchorHead):
"""Rotated Anchor-based head (RotatedRPN, RotatedRetinaNet, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
reg_decoded_bbox (bool): If true, the regression loss would be
applied on decoded bounding boxes. Default: False
assign_by_circumhbbox (str): If None, assigner will assign according to
the IoU between anchor and GT (OBB), called RetinaNet-OBB.
If angle definition method, assigner will assign according to the
IoU between anchor and GT's circumbox (HBB), called RetinaNet-HBB.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
init_cfg (dict or list[dict], optional): Initialization config dict.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
loss_cls=dict(type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0),
loss_iou=dict(type='RotatedIoULoss', loss_weight=1.0),
train_cfg=None,
test_cfg=None,
**kwargs):
super(RotatedEoodHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_iou = build_loss(loss_iou)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert test_cfg is not None, "eood need test cfg"
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
self.pola = build_assigner(self.train_cfg.pola)
self.init_epoch = self.train_cfg.init_epoch
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.epoch = 0
def _init_layers(self):
"""Initialize layers of the head."""
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 5, 3, padding=1)
def init_weights(self):
module_name = self.__class__.__name__
logger_names = list(logger_initialized.keys())
logger_name = logger_names[0] if logger_names else 'mmcv'
print_log(
f'initialize {module_name} with init_cfg {self.init_cfg}',
logger=logger_name)
initialize(self, self.init_cfg)
def forward(self, feats):
"""
Args:
feats(torch.Tensor): Features of multi scale level.[cls_feats, reg_feats]
Returns:
"""
return multi_apply(self.forward_single, feats)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (torch.Tensor): Features of a single scale level.
Returns:
tuple (torch.Tensor):
- cls_score (torch.Tensor): Cls scores for a single scale \
level the channels number is num_anchors * num_classes.
- bbox_pred (torch.Tensor): Box energies / deltas for a \
single scale level, the channels number is num_anchors * 5.
"""
cls_feat = x[0]
reg_feat = x[1]
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
def _get_targets_single(self,
cls_scores,
bbox_preds,
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in a
single image.
Args:
flat_anchors (torch.Tensor): Multi-level anchors of the image,
which are concatenated into a single tensor of shape
(num_anchors, 5)
valid_flags (torch.Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
gt_bboxes (torch.Tensor): Ground truth bboxes of the image,
shape (num_gts, 5).
img_meta (dict): Meta info of the image.
gt_bboxes_ignore (torch.Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 5).
img_meta (dict): Meta info of the image.
gt_labels (torch.Tensor): Ground truth labels of each box,
shape (num_gts,).
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple (list[Tensor]):
- labels_list (list[Tensor]): Labels of each level
- label_weights_list (list[Tensor]): Label weights of each \
level
- bbox_targets_list (list[Tensor]): BBox targets of each level
- bbox_weights_list (list[Tensor]): BBox weights of each level
- num_total_pos (int): Number of positive samples in all images
- num_total_neg (int): Number of negative samples in all images
"""
inside_flags = rotated_anchor_inside_flags(
flat_anchors, valid_flags, img_meta['img_shape'][:2],
self.train_cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
if self.assign_by_circumhbbox is not None:
gt_bboxes_assign = obb2hbb(gt_bboxes, self.assign_by_circumhbbox)
assign_result = self.assigner.assign(
anchors, gt_bboxes_assign, gt_bboxes_ignore,
None if self.sampling else gt_labels)
else:
if self.epoch < self.init_epoch:
assign_result = self.assigner.assign(
anchors, gt_bboxes, gt_bboxes_ignore,
None if self.sampling else gt_labels)
max_iou = None
else:
pred_logits = cls_scores[inside_flags, :]
pred_bbox_delta = bbox_preds[inside_flags, :]
pred_bbox = self.bbox_coder.decode(anchors, pred_bbox_delta)
assign_result = self.pola.assign(
pred_logits, pred_bbox,
gt_labels, gt_bboxes, img_meta)
max_iou = assign_result.max_overlaps
sampling_result = self.sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
gt_targets = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
else:
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
gt_targets[pos_inds, :] = gt_bboxes[sampling_result.pos_assigned_gt_inds]
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class since v2.5.0
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
if max_iou is not None:
label_weights[neg_inds] = max_iou[neg_inds]
else:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(
labels, num_total_anchors, inside_flags,
fill=self.num_classes) # fill bg label
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
gt_targets = unmap(gt_targets, num_total_anchors, inside_flags)
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds, gt_targets, sampling_result)
def get_targets(self,
cls_scores,
bbox_preds,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True,
return_sampling_results=False):
"""Compute regression and classification targets for anchors in
multiple images.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 5).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: Usually returns a tuple containing learning targets.
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each \
level.
- bbox_targets_list (list[Tensor]): BBox targets of each level.
- bbox_weights_list (list[Tensor]): BBox weights of each level.
- num_total_pos (int): Number of positive samples in all \
images.
- num_total_neg (int): Number of negative samples in all \
images.
additional_returns: This function enables user-defined returns from
`self._get_targets_single`. These returns are currently refined
to properties at each feature map (i.e. having HxW dimension).
The results will be concatenated after the end
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors to a single tensor
concat_anchor_list = []
concat_valid_flag_list = []
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
concat_anchor_list.append(torch.cat(anchor_list[i]))
concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
results = multi_apply(
self._get_targets_single,
cls_scores,
bbox_preds,
concat_anchor_list,
concat_valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list, all_gt_targets, sampling_results_list) = results[:8]
rest_results = list(results[8:]) # user-added return values
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
gt_targets_list = images_to_levels(all_gt_targets,
num_level_anchors)
res = (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, gt_targets_list, num_total_pos, num_total_neg)
if return_sampling_results:
res = res + (sampling_results_list, )
for i, r in enumerate(rest_results): # user-added return values
rest_results[i] = images_to_levels(r, num_level_anchors)
return res + tuple(rest_results)
def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
bbox_targets, bbox_weights, gt_targets, num_total_samples):
"""Compute loss of a single scale level.
Args:
cls_score (torch.Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (torch.Tensor): Box energies / deltas for each scale
level with shape (N, num_anchors * 5, H, W).
anchors (torch.Tensor): Box reference for each scale level with
shape (N, num_total_anchors, 5).
labels (torch.Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (torch.Tensor): Label weights of each anchor with
shape (N, num_total_anchors)
bbox_targets (torch.Tensor): BBox regression targets of each anchor
weight shape (N, num_total_anchors, 5).
bbox_weights (torch.Tensor): BBox regression loss weights of each
anchor with shape (N, num_total_anchors, 5).
num_total_samples (int): If sampling, num total samples equal to
the number of total anchors; Otherwise, it is the number of
positive anchors.
Returns:
tuple (torch.Tensor):
- loss_cls (torch.Tensor): cls. loss for each scale level.
- loss_bbox (torch.Tensor): reg. loss for each scale level.
"""
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 5)
bbox_weights = bbox_weights.reshape(-1, 5)
gt_targets = gt_targets.reshape(-1, 5)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 5)
if self.reg_decoded_bbox:
anchors = anchors.reshape(-1, 5)
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
anchors = anchors.reshape(-1, 5)
pred_bboxes = self.bbox_coder.decode(anchors, bbox_pred)
loss_iou = self.loss_iou(
pred_bboxes,
gt_targets,
bbox_weights,
avg_factor=num_total_samples,
)
return loss_cls, loss_bbox, loss_iou
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 5, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 5) in [cx, cy, w, h, a] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss. Default: None
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
num_imgs = len(img_metas)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
# cls_score_list = [
# cls_scores[i][img_id].detach() for i in range(num_levels)
# ]
# bbox_pred_list = [
# bbox_preds[i][img_id].detach() for i in range(num_levels)
# ]
flatten_cls_scores = torch.cat([
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
], 1)
flatten_bbox_preds = torch.cat([
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 5)
for bbox_pred in bbox_preds
], 1)
cls_reg_targets = self.get_targets(
flatten_cls_scores,
flatten_bbox_preds,
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, gt_targets_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i, _ in enumerate(anchor_list):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
losses_cls, losses_bbox, losses_iou = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
gt_targets_list,
num_total_samples=num_total_samples)
return dict(loss_cls=losses_cls, loss_iou=losses_iou)#loss_bbox=losses_bbox,, loss_cls_o2m=losses_cls1, loss_bbox_o2m = losses_bbox1)
| zhangiguang/EOOD | mmrotate/models/dense_heads/rotated_eood_head.py | rotated_eood_head.py | py | 23,502 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "rotated_anchor_head.RotatedAnchorHead",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "builder.build_loss",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "builder.build_loss",
"line_number": 81,
"usage_type": "call"
},
{
"api_n... |
7253701477 | from core.models import Habit, DailyRecord, User
from rest_framework import serializers
class DailyRecordSerializer(serializers.ModelSerializer):
class Meta:
model = DailyRecord
fields = ("date", "note",)
class HabitSerializer(serializers.ModelSerializer):
daily_records = DailyRecordSerializer(many=True, read_only=False)
class Meta:
model = Habit
fields = (
"pk",
"title",
"goal",
"created_at",
"daily_records",
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'is_staff']
# class RecordSerializer(serializers.ModelSerializer):
# habit = HabitSerializer(read_only=)
# class Meta:
# model = DailyRecord
# fields = ("habit", "date", "note",)
| Momentum-Team-9/django-habit-tracker-esparr | api/serializers.py | serializers.py | py | 875 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "core.models.DailyRecord",
"line_number": 7,
"usage_type": "name"... |
36521165263 | import json
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, f1_score
def train_model(features, target, train_params):
mt = train_params.model_type
if train_params.model_type == 'RandomForestClassifier':
model = RandomForestClassifier(n_estimators=train_params.model_params.RandomForestClassifier.n_estimators,
random_state=train_params.model_params.RandomForestClassifier.random_state,
min_samples_leaf=train_params.model_params.RandomForestClassifier.min_samples_leaf,
max_depth=train_params.model_params.RandomForestClassifier.forest_max_depth)
elif train_params.model_type == 'DecisionTreeClassifier':
model = DecisionTreeClassifier(random_state=train_params.model_params.DecisionTreeClassifier.random_state,
min_samples_leaf=train_params.model_params.DecisionTreeClassifier.min_samples_leaf,
max_depth=train_params.model_params.DecisionTreeClassifier.max_depth)
model.fit(features, target)
return model
def predict_model(model, features):
predicts = model.predict(features)
return predicts
def evaluate_model(predicts, target):
f1 = round(f1_score(target, predicts), 3)
accuracy = round(accuracy_score(target, predicts), 3)
return {
'accuracy': accuracy,
'f1_score': f1
}
def write_csv_data(data, path):
data = pd.DataFrame(data, columns=['y'])
data.to_csv(path, index=False)
def write_json_data(data, path):
with open(path, 'w') as file:
json.dump(data, file)
| made-mlops-2022/made_obarskayats | ml_project/models/model_fit_predict.py | model_fit_predict.py | py | 1,782 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 29,
"usage_type": "... |
74497600425 | #!/usr/bin/env python3
import asyncio
import atexit
import json
import socket
from collections import deque
from datetime import datetime
from logging import getLogger, INFO, StreamHandler, FileHandler
from time import sleep
from subprocess import run
from os import path
from simple_pid import PID
from tinkerforge.ip_connection import IPConnection
from tinkerforge.ip_connection import Error as TFConnectionError
from tinkerforge.bricklet_lcd_128x64 import BrickletLCD128x64
from tinkerforge.bricklet_thermocouple_v2 import BrickletThermocoupleV2
from tinkerforge.bricklet_solid_state_relay_v2 import BrickletSolidStateRelayV2
def last_n_values(n, iterable):
for i in range(n, 0, -1):
yield iterable[-i]
LOGGER = getLogger(__name__)
LOGGER.setLevel(INFO)
STDOUT_HANDLER = StreamHandler()
LOGGER.addHandler(STDOUT_HANDLER)
DATETIME_FMT = "%d/%m/%Y %H:%M:%S"
PID_TUNING_FILE_PATH = "tuning.json"
HOST = "localhost"
PORT = 4223
THERMOCOUPLE_READ_PERIOD = 1000
GUI_READ_PERIOD = 100
PWM_PERIOD = 1000
N_SMOOTHING_POINTS = 5
# fmt: off
CONTROL_ICON = [
0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,0,0,1,0,0,0,1,0,0,1,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,1,0,1,1,1,0,0,1,0,0,1,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,1,1,1,0,1,1,1,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,1,0,0,1,1,1,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
]
GRAPH_ICON = [
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,
1,0,0,0,1,1,0,0,0,1,0,0,1,0,0,0,1,0,0,1,1,1,0,0,1,0,0,0,
1,0,1,1,0,0,0,0,0,0,1,1,0,1,1,1,0,1,1,0,0,0,0,0,0,1,1,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
]
SETTINGS_ICON = [
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
]
# fmt: on
class Heater:
# These are the bricklet objects which are populated
# as the hardware responds to enumeration request.
ipcon = None
lcd = None
thermocouple = None
relay = None
pid = None
# Current PID setpoint
setpoint = 0
# PWM power for output. 0 - 100
heater_power = 0
# Current state of output. Boolean
heater_active = False
# Current state of thermocouple.
# False if bricklet reports an error state.
thermocouple_active = True
# Current active GUI tab index
active_tab = 0
# Number of readings to keep in state. This is set to match graph width
n_temp_points = 107
starting_temp = 20
initial_temp_data = [starting_temp] * N_SMOOTHING_POINTS
temp_data = deque(initial_temp_data, n_temp_points)
# Min and max for graph Y axis. Updated automatically with data
axis_min = starting_temp - 10
axis_max = starting_temp + 10
# Set true to read tuning parameters every iteration
tuning_mode = False
# Set true to log data to a file
logging_mode = False
# Current target tunings
tunings = {"p": 0, "i": 0, "d": 0, "bias": 0, "proportional_on_measurement": False}
def __init__(self):
LOGGER.info("Heater starting...")
self._init_pid()
self.ipcon = IPConnection()
while True:
try:
self.ipcon.connect(HOST, PORT)
break
except TFConnectionError as error:
LOGGER.error("Connection Error: " + str(error.description))
sleep(1)
except socket.error as error:
LOGGER.error("Socket Error: " + str(error))
sleep(1)
self.ipcon.register_callback(IPConnection.CALLBACK_ENUMERATE, self.cb_enumerate)
self.ipcon.register_callback(IPConnection.CALLBACK_CONNECTED, self.cb_connected)
if self.logging_mode:
logfile_path = f"{datetime.now().timestamp():0f}_heated_data.csv"
self.data_logger = getLogger("data-logger")
self.data_logger.setLevel(INFO)
self.data_logger.addHandler(FileHandler(logfile_path))
self.data_logger.addHandler(STDOUT_HANDLER)
self.data_logger.info(
f"Timestamp, Temp (°C), Setpoint (°C), Power (%), Kp, Ki, Kd, Cp, Ci, Cd"
)
while True:
try:
self.ipcon.enumerate()
return
except TFConnectionError as error:
LOGGER.error("Enumerate Error: " + str(error.description))
sleep(1)
def _init_pid(self):
self.pid = PID(setpoint=self.setpoint, output_limits=(0, 100))
self._read_pid_tunings_from_file()
self._set_pid_tuning(self.tunings)
def _read_pid_tunings_from_file(self):
if not path.exists(PID_TUNING_FILE_PATH):
LOGGER.info(
f"{PID_TUNING_FILE_PATH} does not exist. Using default tunings."
)
return
with open(PID_TUNING_FILE_PATH, "r") as f:
self.tunings = json.load(f)
def _set_pid_tuning(self, tuning_dict):
tunings = (tuning_dict.get(parameter, 0) for parameter in ("p", "i", "d"))
self.pid.tunings = tunings
self.pid.proportional_on_measurement = tuning_dict.get(
"proportional_on_measurement", False
)
self.pid.bias = tuning_dict.get("bias", 0)
def _init_lcd(self, uid):
try:
self.lcd = BrickletLCD128x64(uid, self.ipcon)
self.lcd.clear_display()
self.lcd.remove_all_gui()
LOGGER.info("LCD128x64 initialized")
except TFConnectionError as error:
LOGGER.error("LCD128x64 init failed: " + str(error.description))
return
self.lcd.set_gui_tab_selected_callback_configuration(GUI_READ_PERIOD, True)
self.lcd.register_callback(
BrickletLCD128x64.CALLBACK_GUI_TAB_SELECTED, self.cb_tab
)
self.lcd.set_gui_tab_configuration(self.lcd.CHANGE_TAB_ON_CLICK_AND_SWIPE, True)
self.lcd.set_gui_tab_icon(0, CONTROL_ICON)
self.lcd.set_gui_tab_icon(1, GRAPH_ICON)
self.lcd.set_gui_tab_icon(2, SETTINGS_ICON)
self.lcd.set_gui_button_pressed_callback_configuration(GUI_READ_PERIOD, True)
self.lcd.register_callback(
BrickletLCD128x64.CALLBACK_GUI_BUTTON_PRESSED, self.cb_button
)
# Set initial tab
self.cb_tab(self.active_tab)
def cb_tab(self, index):
self.active_tab = index
self.lcd.clear_display()
if index == 0:
self.write_temp()
self.write_setpoint()
self.write_power()
self.lcd.set_gui_button(0, 2, 18, 61, 11, "-1\xDFC")
self.lcd.set_gui_button(1, 66, 18, 61, 11, "+1\xDFC")
self.lcd.set_gui_button(2, 2, 30, 61, 11, "-10\xDFC")
self.lcd.set_gui_button(3, 66, 30, 61, 11, "+10\xDFC")
self.lcd.set_gui_button(4, 2, 42, 61, 11, "-100\xDFC")
self.lcd.set_gui_button(5, 66, 42, 61, 11, "+100\xDFC")
elif index == 1:
self.lcd.set_gui_graph_configuration(
0, BrickletLCD128x64.GRAPH_TYPE_LINE, 20, 0, 107, 52, "", ""
)
self.update_graph()
self.lcd.draw_text(0, 23, BrickletLCD128x64.FONT_6X8, True, "\xDFC")
self.update_axis()
elif index == 2:
self.lcd.draw_text(0, 0, BrickletLCD128x64.FONT_6X8, True, "BV21")
self.lcd.set_gui_button(6, 0, 10, 80, 20, "Shut Down")
def _cb_set_button(self, setpoint):
self.setpoint = setpoint
self.pid.setpoint = setpoint
self.write_setpoint()
def cb_button(self, index, value):
if value is False:
return
if index == 0:
self._cb_set_button(max(self.setpoint - 1, 0))
elif index == 1:
self._cb_set_button(min(self.setpoint + 1, 1500))
elif index == 2:
self._cb_set_button(max(self.setpoint - 10, 0))
elif index == 3:
self._cb_set_button(min(self.setpoint + 10, 1500))
elif index == 4:
self._cb_set_button(max(self.setpoint - 100, 0))
elif index == 5:
self._cb_set_button(min(self.setpoint + 100, 1500))
elif index == 6:
self.close()
self.shutdown_host()
def _init_thermocouple(self, uid):
try:
self.thermocouple = BrickletThermocoupleV2(uid, self.ipcon)
LOGGER.info("Thermocouple initialized")
except TFConnectionError as error:
LOGGER.error("Thermocouple init failed: " + str(error.description))
return
self.thermocouple.set_configuration(
BrickletThermocoupleV2.AVERAGING_16,
BrickletThermocoupleV2.TYPE_K,
BrickletThermocoupleV2.FILTER_OPTION_60HZ,
)
self.thermocouple.set_temperature_callback_configuration(
THERMOCOUPLE_READ_PERIOD, False, "x", 0, 0
)
self.thermocouple.register_callback(
BrickletThermocoupleV2.CALLBACK_ERROR_STATE, self.cb_thermocouple_error
)
self.thermocouple.register_callback(
BrickletThermocoupleV2.CALLBACK_TEMPERATURE, self.cb_thermocouple_reading
)
def cb_thermocouple_error(self, over_under, open_circuit):
if any((over_under, open_circuit)):
self.thermocouple_active = False
else:
self.thermocouple_active = True
LOGGER.info(
f"Thermocouple reports: "
f"over/under voltage {over_under}, open-circuit {open_circuit}"
)
def get_pid_value(self):
current_temp = (
sum(last_n_values(N_SMOOTHING_POINTS, self.temp_data)) / N_SMOOTHING_POINTS
)
if self.tuning_mode:
self._read_pid_tunings_from_file()
self._set_pid_tuning(self.tunings)
return self.pid(current_temp)
def cb_thermocouple_reading(self, value):
if self.thermocouple_active:
current_temp = value / 100
self.temp_data.append(current_temp)
power = self.get_pid_value()
else:
power = 0
LOGGER.info("Thermocouple in error state. Output deactivated.")
old_power = self.heater_power
sticky_state_active = old_power == 100 or old_power == 0
self.heater_power = power
if power == 100:
self.relay.set_state(True)
self.heater_active = True
elif power == 0:
self.relay.set_state(False)
self.heater_active = False
elif 0 < power < 100 and sticky_state_active:
# If we're coming out of a sticky state, kick of the
# flop loop for PWM.
self.relay.set_state(False)
self.heater_active = False
self.relay.set_monoflop(False, 0)
self.write_temp()
self.write_power()
self.update_graph()
if self.logging_mode:
self.log_line()
def log_line(self):
timestamp = datetime.now().strftime(DATETIME_FMT)
current_temp = self.temp_data[-1]
kp, ki, kd = self.pid.tunings
cp, ci, cd = self.pid.components
log_line = ", ".join(
str(value)
for value in (
timestamp,
current_temp,
self.setpoint,
self.heater_power,
kp,
ki,
kd,
cp,
ci,
cd,
)
)
self.data_logger.info(log_line)
def _init_relay(self, uid):
try:
self.relay = BrickletSolidStateRelayV2(uid, self.ipcon)
LOGGER.info("Relay initialized")
except TFConnectionError as error:
LOGGER.error("Relay init failed: " + str(error.description))
return
self.relay.register_callback(
BrickletSolidStateRelayV2.CALLBACK_MONOFLOP_DONE, self.cb_relay_flop
)
self.relay.set_state(False)
def cb_relay_flop(self, _):
on_time = round((self.heater_power / 100) * PWM_PERIOD)
off_time = PWM_PERIOD - on_time
if self.heater_power < 100:
if self.heater_active:
self.relay.set_monoflop(False, off_time)
self.heater_active = False
else:
self.relay.set_monoflop(True, on_time)
self.heater_active = True
# If power is 0 or 100, we're not using the flop loop
def write_temp(self):
if self.lcd is None:
return
if self.active_tab != 0:
return
current_temp = self.temp_data[-1]
temp_string = (
f"T: {current_temp:2.0f}\xDFC" if self.thermocouple_active else "T: ERR!"
)
self.lcd.draw_box(0, 0, 59, 10, True, BrickletLCD128x64.COLOR_WHITE)
self.lcd.draw_text(0, 0, BrickletLCD128x64.FONT_6X8, True, temp_string)
def write_power(self):
if self.lcd is None:
return
if self.active_tab != 0:
return
self.lcd.draw_box(0, 10, 127, 19, True, BrickletLCD128x64.COLOR_WHITE)
string = f"Power: {self.heater_power:3.1f}%"
self.lcd.draw_text(0, 10, BrickletLCD128x64.FONT_6X8, True, string)
def write_setpoint(self):
if self.lcd is None:
return
if self.active_tab != 0:
return
set_string = f"S: {self.setpoint}\xDFC"
self.lcd.draw_box(60, 0, 127, 10, True, BrickletLCD128x64.COLOR_WHITE)
self.lcd.draw_text(60, 0, BrickletLCD128x64.FONT_6X8, True, set_string)
def update_axis(self):
self.lcd.draw_box(0, 0, 20, 10, True, BrickletLCD128x64.COLOR_WHITE)
self.lcd.draw_box(0, 45, 20, 55, True, BrickletLCD128x64.COLOR_WHITE)
self.lcd.draw_text(
0, 0, BrickletLCD128x64.FONT_6X8, True, f"{self.axis_max:3.0f}"
)
self.lcd.draw_text(
0, 45, BrickletLCD128x64.FONT_6X8, True, f"{self.axis_min:3.0f}"
)
self.lcd.draw_text(0, 107, BrickletLCD128x64.FONT_6X8, True, f"")
def update_graph(self):
if self.lcd is None:
return
if self.active_tab != 1:
return
max_temp = max(self.temp_data)
min_temp = min(self.temp_data)
# Pad a little bit for looks
max_temp *= 1.1
min_temp *= 0.9
diff = max_temp - min_temp
if diff == 0:
# This probably means we don't have any data yet
return
scaled_data = [((value - min_temp) / diff) * 255 for value in self.temp_data]
# This gets rid of any randomness which apparently sometimes occurs when
# the thermocouple bricklet is physically bumped.
scaled_data = map(lambda value: max(min(value, 255), 0), scaled_data)
if max_temp != self.axis_max or min_temp != self.axis_min:
self.axis_max = max_temp
self.axis_min = min_temp
self.update_axis()
self.lcd.set_gui_graph_data(0, scaled_data)
def cb_enumerate(self, uid, _, __, ___, ____, device_identifier, enumeration_type):
if (
enumeration_type == IPConnection.ENUMERATION_TYPE_CONNECTED
or enumeration_type == IPConnection.ENUMERATION_TYPE_AVAILABLE
):
if device_identifier == BrickletLCD128x64.DEVICE_IDENTIFIER:
self._init_lcd(uid)
elif device_identifier == BrickletThermocoupleV2.DEVICE_IDENTIFIER:
self._init_thermocouple(uid)
elif device_identifier == BrickletSolidStateRelayV2.DEVICE_IDENTIFIER:
self._init_relay(uid)
def cb_connected(self, connected_reason):
if connected_reason == IPConnection.CONNECT_REASON_AUTO_RECONNECT:
LOGGER.info("Auto Reconnect")
while True:
try:
self.ipcon.enumerate()
break
except TFConnectionError as error:
LOGGER.error("Enumerate Error: " + str(error.description))
sleep(1)
def close(self):
if self.lcd:
self.lcd.clear_display()
self.lcd.remove_all_gui()
if self.relay:
self.relay.set_state(False)
if self.ipcon is not None:
self.ipcon.disconnect()
LOGGER.info("Heater shut down")
def shutdown_host(self):
run("sudo shutdown now", shell=True)
if __name__ == "__main__":
heater = Heater()
atexit.register(heater.close)
loop = asyncio.get_event_loop()
try:
loop.run_forever()
finally:
loop.close()
| BenVosper/heated | regulated.py | regulated.py | py | 17,089 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "logging.StreamHandler",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "collections.... |
38724560306 | # import the packages
from getAnimePics.items import GetanimepicsItem
import datetime
import scrapy
from scrapy.exceptions import CloseSpider
pageNumber = 0
class CoverSpider(scrapy.Spider):
name = "gelbooruSearch"
allowed_domains = ['gelbooru.com']
start_urls = ["https://gelbooru.com/index.php?page=post&s=list&tags=okita_souji_%28fate%29+solo+rating%3asafe"]
custom_settings = {
"ITEM_PIPELINES": {'scrapy.pipelines.images.ImagesPipeline': 1},
"IMAGES_STORE": 'D:\Cute anime girls\Okita'
}
def request(self, url, callback):
request = scrapy.Request(url=url, callback=callback)
request.cookies['resize-original'] = "1"
request.cookies['resize-notification'] = "1"
request.headers['User-Agent'] = ('Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/45.0.2454.85 Safari/537.36')
return request
def parse(self, response):
#Starts on first page and increases the page number once every image is downloaded
imagesPerPage = 42
#For every thumbnail, extract the image link from under the span a attribute
url = response.css("span a::attr(href)").extract()
urlList = zip(url)
#Gets every url in the list and then makes them into a string
for item in urlList:
imageLink = ''.join(item)
imageLink = "https:" + imageLink
#Once the link is converted into a string, it uses it as the URL and calls parse_images
yield self.request(imageLink, callback=self.parse_images)
nextUrl = response.url.split("&pid=")
global pageNumber
pageNumber += 1
nextPage = nextUrl[0] + "&pid=" + str(pageNumber * imagesPerPage)
if len(urlList) == 42:
yield self.request(nextPage, callback=self.parse)
def parse_images(self, response):
#this is real bad
imageUrl = response.css("img::attr(src)").extract_first()
realUrl = ''.join(imageUrl)
#if the following pic is not in original quality
if "samples" in realUrl:
#hard code found the xpath on gelbooru seems to be the same for every image though
list = response.xpath("/html/body/div[4]/div[5]/script[2]/text()").extract()
tempString = ''.join(list)
#extracts part of the hd image url, not sample
tempString = tempString[(tempString.index('\'img\'')):(tempString.index('\', \'base_dir'))]
image = tempString.split('\'img\':\'')
image = ''.join(image[1])
#replaces the imageurl if it is a sample
realUrl = realUrl.replace("samples", "images")
realUrl = realUrl.split("sample_")
realUrl = realUrl[0] + image
#Converts the URL into a string and places it in images_urls, which is used to download the image
yield GetanimepicsItem(image_urls=[realUrl])
| kkc028/Animage-Scraper | getAnimePics/spiders/coverspider.py | coverspider.py | py | 2,990 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "getAnimePics.items.GetanimepicsItem",
"line_number": 65,
"usage_type": "call"
}
] |
38697493522 | # TODO: import 見直し
import numpy as np
from collections import deque
from tqdm import tqdm # progress bar
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import gym
from gym import spaces
from gym.spaces.box import Box
import cv2
cv2.ocl.setUseOpenCL(False)
from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv
import networks
import agents
import memories
"""
Main Environment
"""
class Environment:
def run(self, env_name, num_processes, num_stack_frame, num_advanced_step, num_updates):
def make_env(env_id, seed, rank):
def _thunk():
"""
_thunk がマルチプロセス環境の SubprocVecEnv を実行するのに必要らしい
"""
env = gym.make(env_id)
env = NoOpResetEnvironment(env, no_op_max=30)
env = MaxAndSkipEnvironment(env, skip=4)
env.seed(seed + rank)
env = EpisodicLifeEnvironment(env)
env = WarpFrame(env)
env = WrapByTorch(env)
return env
return _thunk
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
print(device)
seed_num = 1
torch.manual_seed(seed_num)
if use_cuda:
torch.cuda.manual_seed(seed_num)
# construct environments
torch.set_num_threads(seed_num)
envs = [ make_env(env_name, seed_num, i) for i in range(num_processes) ]
envs = SubprocVecEnv(envs) # マルチプロセス実行環境
# create instance of Brain class shared by all agents
n_out = envs.action_space.n
actor_critic = networks.Net(n_out, num_stack_frame).to(device)
global_brain = agents.Brain(actor_critic)
# create variables
observation_shape = envs.observation_space.shape # (1, 84, 84)
observation_shape = (observation_shape[0]*num_stack_frame, *observation_shape[1:]) # (4, 84, 84)
current_observation = torch.zeros(num_processes, *observation_shape).to(device)
rollouts = memories.RolloutStorage(num_advanced_step, num_processes, observation_shape, device)
episode_rewards = torch.zeros([num_processes, 1])
final_rewards = torch.zeros([num_processes, 1])
# initialize and start the environment
observation = envs.reset()
observation = torch.from_numpy(observation).float() # torch.Size([16, 1, 84, 84])
current_observation[:, -1:] = observation
#
rollouts.observations[0].copy_(current_observation)
# run
for j in tqdm(range(num_updates)):
for step in range(num_advanced_step):
# calculate action
with torch.no_grad():
action = actor_critic.act(rollouts.observations[step])
cpu_actions = action.squeeze(1).cpu().numpy() # torch.Tensor -> numpy.array
observation, reward, done, info = envs.step(cpu_actions)
# translate reward into torch.Tensor
# change size (16,) -> (16, 1)
reward = np.expand_dims(np.stack(reward), 1)
reward = torch.from_numpy(reward).float()
episode_rewards += reward
# for parallel execution environments, mask is 0(done) or 1(not done)
masks = torch.FloatTensor([[0.] if done_ else [1.] for done_ in done])
# update rewards at the last trial
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
masks = masks.to(device)
# apply masks to current observation
current_observation *= masks.unsqueeze(2).unsqueeze(2)
# stacking the frames: torch.Size([16, 1, 84, 84])
observation = torch.from_numpy(observation).float()
current_observation[:, :-1] = current_observation[:, 1:]
current_observation[:, -1:] = observation
rollouts.insert(current_observation, action.data, reward, masks)
# calculating the state value expected from advanced last step
with torch.no_grad():
next_value = actor_critic.get_value(rollouts.observations[-1]).detach()
rollouts.compute_returns(next_value)
# update network and rollouts
global_brain.update(rollouts)
rollouts.after_update()
# logs
if j%100 == 0:
print("finished frames {}, mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}"
.format(j*num_processes*num_advanced_step,
final_rewards.mean(),
final_rewards.median(),
final_rewards.min(),
final_rewards.max()))
if j%12500 == 0:
torch.save(global_brain.actor_critic.state_dict(), 'weight_' + str(j) + '.pth')
torch.save(global_brain.actor_critic.state_dict(), 'weight_end.pth')
"""
Environments
"""
class NoOpResetEnvironment(gym.Wrapper):
def __init__(self, env, no_op_max=30):
"""
No-Operation 環境の実装
環境リセット後に何もしない状態を [0, no_op_max] の間の数フレームだけ続けることで,
様々な状態からの学習を開始するための設定
Parameters:
env (gym environment) -- OpenAI Gym の Environment 環境
no_op_max (int) -- [0, no_op_max] のランダムでフレーム数が設定される
"""
gym.Wrapper.__init__(self, env)
self.no_op_max = no_op_max
self.override_num_loops = None
self.no_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
"""
環境のリセット (env.reset()) を実行後,
何もしない状態を [0, no_op_max] ステップだけ実行する
"""
self.env.reset(**kwargs)
if self.override_num_loops is not None:
noops = self.override_num_loops
else:
noops = np.random.randint(1, self.no_op_max + 1)
assert noops > 0
observation = None
for _ in range(noops):
observation, _, done, _ = self.env.step(self.no_action)
if done:
self.env.reset(**kwargs)
return observation
def step(self, action):
return self.env.step(action)
class EpisodicLifeEnvironment(gym.Wrapper):
def __init__(self, env):
"""
Episodic Life 環境の実装
Agentが複数機を持つ場合, 毎度リセットすると同じ状態からの学習に偏ってしまうため,
1機減っても環境をリセットせず, 継続して学習を行う
Parameters:
env (gym environment) -- OpenAI Gym の Environment 環境
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
"""
done を更新して返り値を設定
"""
observation, reward, done, info = self.env.step(action)
self.was_real_done = done
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and 0 < lives:
done = True
self.lives = lives
return observation, reward, done, info
def reset(self, **kwargs):
"""
全ての機がなくなった場合, 完全にリセットする
"""
if self.was_real_done:
observation = self.env.reset(**kwargs)
else:
observation, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return observation
class MaxAndSkipEnvironment(gym.Wrapper):
def __init__(self, env, skip=4):
"""
高フレームレートで進行するゲームについて,
数フレーム単位で行動をまとめることで学習を簡単化する
"""
gym.Wrapper.__init__(self, env)
self._observation_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""
action を self._skip 分繰り返す
reward は合計して, 最後の2フレーム分の observation を最大化する
"""
total_reward = 0.
done = None
for i in range(self._skip):
observation, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._observation_buffer[0] = observation
if i == self._skip - 1:
self._observation_buffer[1] = observation
total_reward += reward
if done:
break
max_observation = self._observation_buffer.max(axis=0)
return max_observation, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
"""
Observations
"""
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""
Observation が画像なので, 84x84 のグレイスケール画像に変換する
"""
gym.ObservationWrapper.__init__(self, env)
self.image_width = 84
self.image_height = 84
self.observation_space = spaces.Box(low=0, high=255, shape=(self.image_height, self.image_width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class WrapByTorch(gym.ObservationWrapper):
def __init__(self, env=None):
"""
PyTorch のミニバッチのインデックス順に変更する
[縦, 横, 色]
"""
super(WrapByTorch, self).__init__(env)
observation_shape = self.observation_space.shape
self.observation_space = Box(self.observation_space.low[0, 0, 0],
self.observation_space.high[0, 0, 0],
[observation_shape[2], observation_shape[1], observation_shape[0]],
dtype=self.observation_space.dtype)
def observation(self, observation):
return observation.transpose(2, 0, 1) | retrobighead/space_invaders | lib/environments.py | environments.py | py | 11,257 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.ocl.setUseOpenCL",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.ocl",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "gym.make",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
... |
27519529329 | import urllib
import urllib.request
from bs4 import BeautifulSoup
import os
def table(url):
thepage=urllib.request.urlopen(url)
soup=BeautifulSoup(thepage,"html.parser")
return soup
# soup=table("https://www.marmiton.org/recettes/")
# page1=soup.findAll('h4',{'class':'recipe-card__title'})
# tab=[]
# for i in page1:
# tab.append(i.text)
# print(tab)
link=['recette_brochettes-de-canard-aux-peches_30507.aspx','recette_clafoutis-aux-abricots-et-aux-amandes_71156.aspx']
for li in link:
soup=table("https://www.marmiton.org/recettes/"+ li +" ")
nom_de_la_recette=soup.find('h1').text
print(nom_de_la_recette)
nombre_de_personnes=soup.find('span',{'class':'title-2 recipe-infos__quantity__value'}).text
print(nombre_de_personnes)
durée_de_préparation=soup.find('span',{'class':'recipe-infos__timmings__value'}).text.strip()
print(durée_de_préparation)
durée=soup.find('div',{'class':'recipe-infos__timmings__cooking'})
durée_de_cuisson=durée.find('span',{'class':'recipe-infos__timmings__value'}).text.strip()
print(durée_de_cuisson)
liste_des_ustensiles=""
for i in soup.findAll('span',{'class':'recipe-utensil__name'}):
liste_des_ustensiles=liste_des_ustensiles+i.text.strip() +'\n'
print(liste_des_ustensiles)
liste_des_ingrédients=""
# print(soup.find('ul',{'class':'recipe-ingredients__list'}).text)
for i in soup.findAll('ul',{'class':'recipe-ingredients__list'}):
print(i.text+'\n')
# liste_des_ingrédients= i.find('il').text
# print(liste_des_ingrédients)
étapes_de_préparations=soup.findAll('')
# étapes_de_préparations
| mousaa32/web-scrapping | marmiton.py | marmiton.py | py | 1,671 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 8,
"usage_type": "call"
}
] |
6631832723 | import numpy as np
from tqdm import tqdm
import random
import matplotlib.pyplot as plt
from simplenn.layers import *
class Model:
def __init__(self, layers, verbose=False):
self.layers = layers
def set_verbosity(self, verbose):
for l in self.layers:
if hasattr(l, 'verbose'):
l.verbose=verbose
def __call__(self, x:np.array):
for l in self.layers:
x = l(x)
return x
def backward(self, grad_output):
for l in reversed(self.layers):
grad_output = l.backward(grad_output)
def run_test(layers, epochs, num_data, batch, lr, pbar=True):
def make_model(layers, ref=False):
model = []
for i, num_output in enumerate(layers):
if i == 0:
continue
num_input = layers[i-1]
layer = Linear(num_input, num_output)
if ref:
layer.w = np.random.uniform(-10, 10, size=(num_input, num_output))
layer.b = np.random.uniform(-20, 20, size=(num_output, ))
model.append(layer)
if i + 1 < len(layers):
model.append(Sigmoid())
return Model(model)
ref_model = make_model(layers, True)
model = make_model(layers, False)
model.set_verbosity(False)
num_batch = (num_data + batch - 1) // batch
fig, ax = plt.subplots()
(ln,) = ax.plot([-1], [0], animated=True)
ax.set_xlim(0, epochs)
plt.show(block=False)
bg = fig.canvas.copy_from_bbox(fig.bbox)
fig.canvas.blit(fig.bbox)
X = [np.random.normal(size=(1, layers[0], )) for _ in range(num_data)]
Y = [ref_model(x) for x in X]
data_idx = list(range(len(X)))
loss_history = []
with tqdm(total=epochs, disable=not pbar) as t:
for e in range(epochs):
random.shuffle(data_idx)
grads = []
for i in range(0, len(data_idx), batch):
x_batch = [X[j] for j in data_idx[i:(i+batch)]]
y_ref_batch = [Y[j] for j in data_idx[i:(i+batch)]]
batch_size = len(x_batch)
x_batch = np.concatenate(x_batch).reshape(batch_size, -1)
y_ref_batch = np.concatenate(y_ref_batch).reshape(batch_size, -1)
y_pred_batch = model(x_batch)
y_pred_batch = y_pred_batch.reshape(y_ref_batch.shape)
grad = y_pred_batch - y_ref_batch
grads.append(grad.copy())
d = lr * grad
model.backward(d)
t.update()
grads = np.concatenate(grads)
loss = np.mean(grad**2)
loss_history.append(loss)
fig.canvas.restore_region(bg)
ax.set_ylim(0, max(loss_history))
ln.set_xdata(np.arange(len(loss_history)))
ln.set_ydata(loss_history)
ax.draw_artist(ln)
fig.canvas.blit(fig.bbox)
fig.canvas.flush_events()
t.set_description(f'Epoch: {e:6} Loss: {float(loss):8.2f}')
run_test([256, 128, 128, 64, 64, 32, 16, 4, 1], 100, 10000, 100, 0.001)
| tlsdmstn56/simple-nn | example/train_linear_model.py | train_linear_model.py | py | 3,129 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.uniform",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.... |
37229458402 | from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf.urls.static import static
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views.generic.base import RedirectView
from filebrowser.sites import site
#Django Admin Customization
admin.site.site_header = "RKeduV Administration Panel"
admin.site.site_title = "Rkeduv Admin Panel"
admin.site.index_title = "Data Administration Portal"
urlpatterns = [
path('admin/filebrowser/', site.urls),
path('admin/', admin.site.urls),
path('',views.home, name='home'),
path('physics/',include('physics.urls')),
path('accounts/',include('accounts.urls')),
path('chemistry/',views.commingsoon, name='commingsoon'),
path('maths/',include('physics.urls')),
path('finance/',include('finance.urls')),
path('examinations/', include('questionbank.urls')),
path('favicon.ico', RedirectView.as_view(url=staticfiles_storage.url('logos/favicon.ico'))),
path('editorjs/', include('django_editorjs_fields.urls')),
path('support/', views.contact, name="support"),
path('aboutus/', views.aboutus, name="aboutus"),
path('tinymce/', include('tinymce.urls')),
path('privacy/', views.privacy, name="privacy"),
path('disclaimer/', views.disclaimer, name="disclaimer"),
path('cookies/', views.cookies, name="cookies"),
path('termsandcondition/', views.terms, name="terms"),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| awsomkiller/onlineTutorial | onlineTutorial/urls.py | urls.py | py | 1,649 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.site",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 12,
"usage_type": "attribute"
},
{
... |
20940467211 | import torch
import torch.nn as nn
from collections import OrderedDict
class OrientedRPN(nn.Module):
def __init__(self, cfg: dict = {}):
super().__init__()
self.fpn_level_num = cfg.get("fpn_level_num", 5)
self.fpn_channels = cfg.get("fpn_channels", 256)
self.num_anchors = cfg.get("num_anchors", 3)
self.conv = nn.ModuleDict(
{str(i): nn.Conv2d(self.fpn_channels, 256, 3, 1, "same") for i in range(self.fpn_level_num)}
)
self.regression_branch = nn.ModuleDict(
{str(i): nn.Conv2d(256, 6 * self.num_anchors, 1, 1) for i in range(self.fpn_level_num)}
)
self.objectness_branch = nn.ModuleDict(
{str(i): nn.Conv2d(256, self.num_anchors, 1, 1) for i in range(self.fpn_level_num)}
)
def forward_single(self, x: torch.Tensor, fpn_level: str):
x = self.conv[fpn_level](x)
anchor_offsets = self.regression_branch[fpn_level](x)
objectness_scores = self.objectness_branch[fpn_level](x)
return {"anchor_offsets": anchor_offsets, "objectness_scores": objectness_scores}
def forward(self, x):
assert isinstance(x, OrderedDict)
output = OrderedDict()
for idx, (k, v) in enumerate(x.items()):
output[k] = self.forward_single(v, str(idx))
return output
| Simon128/pytorch-ml-models | models/oriented_rcnn/oriented_rpn/oriented_rpn.py | oriented_rpn.py | py | 1,346 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleDict",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
40311915793 | import backtrader as bt
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,8)
import csv
cerebro = bt.Cerebro(stdstats=False)
cerebro.addobserver(bt.observers.BuySell)
cerebro.addobserver(bt.observers.Broker)
data = bt.feeds.GenericCSVData(
dataname='dataMT5/EURUSDM1_220301.csv',
nullvalue=0.0,
timeframe=bt.TimeFrame.Minutes,
compression=1,
fromdate=datetime(2023, 3, 1, 9, 45, 00),
todate=datetime(2023, 4, 22, 00, 00, 00),
dtformat=('%Y.%m.%d %H:%M'),
open=1,
high=2,
low=3,
close=4,
volume=5,
openinterest=-1
)
cerebro.adddata(data)
class SeizeVolatilityStrategy(bt.Strategy):
params = (('price_base', 1.0300),
('price_unit', 0.0020),
('value_unit', 600),
('max_unit', 35))
def __init__(self):
self.logfile = None
self.csvfile = None
self.csvwriter = None
self.order = None
self.order_time = None
self.units = 0
self.new_units = 0
self.price_position = self.p.price_base - self.p.price_unit * self.units # <0: sell
self.count = 0
self.prev_close = None
def notify_order(self, order):
order_info = '\n----------ORDER BEGIN----------\n%s\n----------ORDER END----------' % order
self.log(order_info)
if order.status in [order.Submitted, order.Accepted]:
return
if order.status in [order.Completed]:
self.units = self.new_units
self.price_position = self.p.price_base - self.p.price_unit * self.units # <0: sell
position = self.broker.getposition(self.data0)
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.4f, Cost: %.4f, Position: %.4f' %
(order.executed.price,
order.executed.value,
position.size))
else: # Sell
self.log('SELL EXECUTED, Price: %.4f, Cost: %.4f, Position: %.4f' %
(order.executed.price,
order.executed.value,
position.size))
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Write down: no pending order
self.order = None
def next(self):
if self.order:
if (datetime.now() - self.order_time).total_seconds() > 60: # order time out
self.log('Order Timeout')
# position = self.broker.getserverposition(self.data0, update_latest=True)
position = self.broker.getposition(self.data0)
self.log('Position size: %d, price: %.4f' % (position.size, position.price))
self.order = None
# cheat as order.completed
self.units = self.new_units
self.price_position = self.p.price_base - self.p.price_unit * self.units # <0: sell
return
self.order_time = datetime.now()
str_close = '%.4f' % (self.data0.close[0])
if str_close != self.prev_close:
self.log('%s --- %d' % (str_close, self.new_units))
self.prev_close = str_close
diff_units = -1 * int((self.data0.close[0] - self.price_position)/self.p.price_unit)
if diff_units != 0 and abs(self.units+diff_units) <= self.p.max_unit:
self.count += 1
self.new_units = self.units + diff_units
position = self.broker.getposition(self.data0)
value = self.broker.getvalue()
cash = self.broker.getcash()
self.log('count: %d, price: %.4f, unit: %d, value: %.2f, cash: %.2f, posi_size: %d, posi_price: %.4f' % \
(self.count, self.data0.close[0], diff_units, value, cash, position.size, position.price))
self.csvwriter.writerow([self.datetime.datetime().strftime('%Y-%m-%d %H:%M:%S'), \
'%d' % self.count, \
'%.4f' % self.data0.close[0], \
'%d' % diff_units, \
'%.2f' % value, \
'%.2f' % cash, \
'%d' % position.size, \
'%.4f' % position.price])
self.csvfile.flush()
if diff_units < 0:
self.order = self.sell(size=self.p.value_unit*abs(diff_units), price=self.data0.close[0])
else:
self.order = self.buy(size=self.p.value_unit*abs(diff_units), price=self.data0.close[0])
def start(self):
self.logfile = open('access.log', 'a')
self.csvfile = open('OandaActivity.csv', 'w')
self.csvwriter = csv.writer(self.csvfile)
self.csvwriter.writerow((['datetime', 'count', 'price', 'unit', 'value', 'cash', 'posi_value', 'posi_price']))
self.done = False
position = self.broker.getposition(self.data0)
self.units = int(position.size / self.p.value_unit)
self.new_units = self.units
self.price_position = self.p.price_base - self.p.price_unit * self.units # <0: sell
self.log('Initialization, Position: %d, %.4f, uints: %d' % (position.size, position.price, self.units),
dt=datetime.now())
def stop(self):
print('Data length: %d' % (len(self.data0)))
self.logfile.close()
self.csvfile.close()
def log(self, txt, dt=None):
dt = dt or self.data0.datetime.datetime()
dtstr = dt.strftime('%Y-%m-%d %H:%M:%S')
self.logfile.write('%s, %s\n' % (dtstr, txt))
self.logfile.flush()
cerebro.broker.setcash(1066.0)
cerebro.broker.setcommission(commission=0.0, margin=0.02)
cerebro.broker.set_slippage_perc(perc=0.005)
cerebro.addanalyzer(bt.analyzers.AnnualReturn, _name='_AnnualReturn')
cerebro.addstrategy(SeizeVolatilityStrategy)
result = cerebro.run()
strat = result[0]
print("--------------- AnnualReturn -----------------")
print(strat.analyzers._AnnualReturn.get_analysis())
AnReturn = strat.analyzers._AnnualReturn.get_analysis()
df = pd.DataFrame(AnReturn.values(), index=AnReturn.keys()).reset_index()
df.columns=['Year', 'AnnualReturn']
df.to_csv('DemoVolatility.csv', index=False)
print(f'Final Portfolio Value: {cerebro.broker.getvalue():.2f}')
figure = cerebro.plot(style='candlestick', volume=False,
barup = '#ff9896', bardown='#98df8a',
tickrotation=10, )[0][0]
figure.savefig('DemoVolatility.png') | webclinic017/volatility-strategy | backtest/DemoVolatility.py | DemoVolatility.py | py | 6,703 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "backtrader.Cerebro",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bac... |
42456044912 | import time
import numpy as np
import scipy.io as sio
from datetime import datetime
import cPickle as pickle
import sys
# local modules
import wtahash as wh
import cluster
import utils
class Evaluation:
''' Class for evaluating the speed and storing the rankings of a dataset
using WTAHash.
'''
def __init__(self, dataset_path):
self.dataset = dataset_path
self.log = ""
def run(self):
# Get the options
s = "Enter the number of classes from the dataset to be used "\
"(0 for all).\n"
self.n_classes = input(s)
s = "Choose an option:\n"\
"- [0] Calculate WTA Hash.\n"\
"- [1] Load a stored WTA Hash.\n"\
"- [2] Calculate WTA Hash and store it.\n"
opt_load = input(s)
k = 16
w = 2
n = 1200
LOAD_HASH = 1
STORE_HASH = 2
s = "Enter the ranking size you want to use (from 1 to 100).\n"
ranking_size = input(s)
if ranking_size > 100:
ranking_size = 100
# Percentage of the data that will be used for training, the rest is
# testing
train_perc = 80
self.log = ""
starting_time = datetime.now()
self.log += "Starting time {0}\n".format(starting_time)
self.log += "Using the first {0} classes of the dataset\n".format(
self.n_classes
)
# Training
#-----------------------------------------------------------------------
train_data, train_labels = self.read_descriptors(train_perc, "training")
if opt_load == LOAD_HASH:
hash_filename = "results/wtahash_{0}.obj".format(self.n_classes)
print("Loading hash from file {0}...".format(hash_filename))
wta_hash = pickle.load(open(hash_filename, "rb"))
else:
wta_hash = self.create_hash(train_data, n, k, w)
if opt_load == STORE_HASH:
self.store_hash(wta_hash)
# Testing
#-----------------------------------------------------------------------
test_data, test_labels = self.read_descriptors(train_perc, "testing")
ranking_size = min((len(train_data), ranking_size))
rankings = self.get_rankings(test_data, wta_hash, ranking_size)
self.store_rankings(rankings)
self.store_labels(train_labels, test_labels)
# Dot products
#-----------------------------------------------------------------------
sorted_prods, prods = self.dot_products(
train_data, test_data, rankings
)
self.store_products(sorted_prods, prods)
# Precision metrics
#-----------------------------------------------------------------------
# Generate relevance rankings
self.metrics(rankings, train_labels, test_labels, sorted_prods)
end_time = datetime.now()
self.log += "Ending time {0}\n".format(end_time)
# Write times in a text file
log_filename = "results/log_{0}.txt".format(self.n_classes)
with open(log_filename, "w") as f:
f.write(self.log)
def read_descriptors(self, training_percentage, set_name):
### Load training information matrix ###
###------------------------------------------------------------------###
print ("Reading {0} instances ...".format(set_name))
start = time.time()
data, labels = cluster.load_classes(
training_percentage, self.dataset, set_name, self.n_classes
)
end = time.time()
self.log += "{0} matrix of shape {1}\n".format(set_name, data.shape)
elapsed_time = utils.humanize_time(end - start)
s = "Elapsed time reading the {0} files: {1}".format(
set_name, elapsed_time
)
self.log += s + "\n"
print (s)
return data, labels
def create_hash(self, train_data, n, k, w):
### Use WTAHash on it ###
###------------------------------------------------------------------###
print ("Starting to generate hash table ...")
start = time.time()
wta_hash = wh.WTAHash(train_data, n, k, w)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
s = "Elapsed time on generation of hash table: {0}".format(elapsed_time)
self.log += s + "\n"
print (s)
return wta_hash
def get_rankings(self, test_data, wta_hash, ranking_size):
### Get the rankings for the test set ###
###------------------------------------------------------------------###
print ("Generating ranking matrix for the test set ...")
start = time.time()
rankings = wta_hash.best_classifiers(test_data, ranking_size)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
s = "Elapsed time generating ranking matrix: {0}".format(elapsed_time)
self.log += s + "\n"
print (s)
return rankings
def dot_products(self, train_data, test_data, rankings):
''' Calculates the dot product for each element in the test set with
every element of the train set. Returns a matrix with two columns
matrix. The first column is the index of the object in the train set
and the second column is the value of the dot product of that object
with the test object with index equal to the number of the row. Then
the number of rows is the number of objects in the test set.
Args:
train_data (np matrix of floats): Each row is the vector of an
object in the train set.
test_data (np matrix of floats): Each row is the vector of an object
in the test set.
rankings (list of lists int): The ranking created for each object
in the test set.
Returns:
list of list of tuples {
e.g.:
0 ranking_size
| |
0 - [[(21, 0.91), (3, 0.87), ...],
[(10, 0.83), (0, 0.72), ...],
...
len(test_data) - [ ... ]]
int: Index of the object in the train set that should be ranked
in the i-th position where i is the number of the row,
float: The value of the dot product between the object in the
train set and the object in the test set in the i-th
position where i is the number of the row.
},
numpy array of arrays of floats: Dot products where the [i-th, j-th]
element is the product between the i-th object of the testing
set and the j-th object of the training set.
'''
### Calculate dot product on the variables ###
###------------------------------------------------------------------###
print ("Calculating dot products on the rankings ...")
start = time.time()
# products is the matrix that stores the dot product of each testing
# vector with each training vector
sorted_prods = []
products = []
ranking_size = len(rankings[0])
step = (len(test_data) * 5) / 100
train_norm = [utils.normalize(train_vec) for train_vec in train_data]
train_norm = np.array(train_norm)
for i in range(len(test_data)):
# y is the current testing vector
y = test_data[i]
y_norm = utils.normalize(y)
current_tuples = []
products.append([])
for j in range(len(train_data)):
# vector is the training object ranked in the current position
vector_norm = train_norm[j]
prod = np.dot(y_norm, vector_norm)
if j < ranking_size:
products[i].append(prod)
current_tuples.append( (j, prod) )
current_tuples.sort(key=lambda x: x[1], reverse=True)
sorted_prods.append(current_tuples[:ranking_size])
if i % step == 0:
percentage = (i * 100) / len(test_data)
print (
"Vector number {0} of {1} ({2}%) multiplied".format(
i, len(test_data), percentage
)
)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
s = "Elapsed time calculating dot products: {0}".format(elapsed_time)
self.log += s + "\n"
print (s)
return sorted_prods, np.array(products)
def metrics(self, rankings, train_labels, test_labels, sorted_prods):
### Calculates classification and products set and position mAP ###
###------------------------------------------------------------------###
print("Starting to calculate metrics ...")
start = time.time()
rel_ranks = []
for i in range(len(rankings)):
rel_ranks.append(
utils.relevance_ranking(
rankings[i], train_labels, test_labels[i]
)
)
# Classification mAP
#-----------------------------------------------------------------------
class_ap = [utils.class_ap(rel_rk) for rel_rk in rel_ranks]
class_ap_filename = "results/class_avg_precs_{0}.txt".format(
self.n_classes
)
utils.write_list(class_ap, class_ap_filename)
class_map = np.mean(class_ap)
self.log += "ranking size = {0}".format(len(rankings[0])) + "\n"
s = "classification mean average precision = {0}".format(class_map)
self.log += s + "\n"
print(s)
# Dot products average precision
#-----------------------------------------------------------------------
# Set
set_prec = []
for i in range(len(rankings)):
indices = [prods[0] for prods in sorted_prods[i]]
precision = utils.prod_set_prec(indices, rankings[i])
set_prec.append(precision)
set_ap_filename = "results/set_avg_precs_{0}.txt".format(
self.n_classes
)
utils.write_list(set_prec, set_ap_filename)
set_map = np.mean(set_prec)
s = "set mean average precision = {0}".format(set_map)
self.log += s + "\n"
print(s)
# Position
pos_prec = []
for i in range(len(rankings)):
indices = [prods[0] for prods in sorted_prods[i]]
precision = utils.prod_pos_prec(indices, rankings[i])
pos_prec.append(precision)
pos_ap_filename = "results/pos_avg_precs_{0}.txt".format(
self.n_classes
)
utils.write_list(pos_prec, pos_ap_filename)
pos_map = np.mean(pos_prec)
s = "position mean average precision = {0}".format(pos_map)
self.log += s + "\n"
print(s)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
s = "Elapsed time calculating metrics: {0}".format(elapsed_time)
self.log += s + "\n"
print (s)
############################################################################
#### Functions for storing values ####
############################################################################
def store_hash(self, wta_hash):
## Store the hash in a binary file
print("Storing the hash in a file ...")
start = time.time()
hash_filename = "results/wtahash_{0}.obj".format(self.n_classes)
pickle.dump(wta_hash, open(hash_filename, "wb"), protocol=2)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
s = "Elapsed time storing the hash {0}".format(elapsed_time)
self.log += s + "\n"
print(s)
def store_rankings(self, rankings):
## Store the rankings in a mat file
print("Storing rankings in a mat file ...")
start = time.time()
rankings_filename = "results/rankings_{0}.mat".format(self.n_classes)
data = {"stored": rankings}
sio.savemat(rankings_filename, data)
end = time.time()
s = "Elapsed time storing the rankings {0} secs.".format(end - start)
self.log += s + "\n"
print(s)
def store_labels(self, train_labels, test_labels):
## Store the labels in a text file
print("Storing the labels in text files...")
start = time.time()
train_labels_fn = "results/train_labels_{0}.txt".format(self.n_classes)
test_labels_fn = "results/test_labels_{0}.txt".format(self.n_classes)
utils.write_list(train_labels, train_labels_fn)
utils.write_list(test_labels, test_labels_fn)
end = time.time()
s = "Elapsed time storing the labels {0} secs.".format(end - start)
self.log += s + "\n"
print(s)
def store_products(self, sorted_prods, products):
# Write products in a mat file
print("Storing products in a mat file ...")
start = time.time()
prods_filename = "results/products_{0}.mat".format(self.n_classes)
sio.savemat(prods_filename, {"stored": products})
# e.g. elem = [(1, 0.94), (12, 0.83), (4, 0.6), ...]
# indices = [1, 12, 4, ...]
indices = [ [prod[0] for prod in row] for row in sorted_prods]
ids_filename = "results/indices_{0}.mat".format(self.n_classes)
sio.savemat(ids_filename, {"stored": indices})
end = time.time()
print("Elapsed time storing the products {0} secs.".format(end - start))
| pombredanne/wtahash | evaluation.py | evaluation.py | py | 13,985 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "cPickle.load",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.datetim... |
7813321006 | import json
from typing import List
import click
import requests
import sqlalchemy as sa
from aspen.config.config import Config
from aspen.database.connection import (
get_db_uri,
init_db,
session_scope,
SqlAlchemyInterface,
)
from aspen.database.models import Pathogen, PathogenLineage
from aspen.util.pathogen_configs import get_lineage_urls
from aspen.workflows.shared_utils.database import (
create_temp_table,
drop_temp_table,
mv_table_contents,
)
def download_lineages(url: str, print_response: bool) -> str:
"""Download lineages file from URL."""
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(f"Error downloading lineages file: {response.text}")
if print_response:
print(response.text)
return response.text
def get_formatted_lineages(
url: str,
format: str,
list_path: List[str],
lineage_keys: List[str],
print_response: bool,
) -> List[str]:
"""Download lineages file from URL."""
source_data = download_lineages(url, print_response)
formatted_data = format_lineage_data(source_data, format, list_path, lineage_keys)
return formatted_data
def format_lineage_data(
source_data: str,
response_format: str,
list_path: List[str],
lineage_keys: List[str],
) -> List[str]:
"""Reformat a lineage endpoint datastructure into a list of lineages.
This method is expected to grow to handle multiple response formats
over time, but it currently supports json responses.
Inputs:
- source_data: the text of the upstream lineage response
- response_format: how we're going to process the response
- list_path: if the list of lineages is nested inside another set of objects, this is the path to that list.
For example, given the structure:
{"name": "pathogen x", "more_info": {"lineages": {"lineage_name": "a.1"}, {"lineage_name", "b.2"}]}}
the path to the lineage list is ["more_info"]["lineages"]
- lineage_keys: Assuming each lineage is represented as a dict, look for these keys within that dict and
add them to our results list. In the case of the example above, lineage_keys is ["lineage_name"]
"""
results = set()
if response_format == "json":
source_obj = json.loads(source_data)
for path in list_path:
source_obj = source_obj[path]
for item in source_obj:
for item_key in lineage_keys:
if item_key in item:
results.add(item[item_key])
return list(results)
def load_lineages_data(pathogen_slug, lineages: list[str]) -> None:
"""Loads all the lineages into DB.
Approach to this is basically duplicating what's in
backend/aspen/workflows/import_gisaid/save.py
Idea is to load all the data into a temp table with same structure,
then once all loaded, drop the rows from "real" table, move the new data
over from the temp table, and finally drop the temp table to wrap up.
The original import script deals with a lot more data, so that has some
performance-specific bits that are not included in this version.
"""
LINEAGE_COL_NAME = "lineage"
interface: SqlAlchemyInterface = init_db(get_db_uri(Config()))
with session_scope(interface) as session:
pathogen = session.execute(sa.select(Pathogen).where(Pathogen.slug == pathogen_slug)).scalars().one() # type: ignore
dest_table = PathogenLineage.__table__
temp_table = create_temp_table(session, dest_table)
# Load data into temp_table
lineage_objects = [
{LINEAGE_COL_NAME: lineage, "pathogen_id": pathogen.id}
for lineage in lineages
]
session.execute(temp_table.insert(), lineage_objects)
# Replace previous data with new data from temp_table
mv_table_contents(
session,
temp_table,
dest_table,
[(PathogenLineage.pathogen_id == pathogen.id)],
)
drop_temp_table(session, temp_table)
# Final sanity check before we commit
count_db_lineages = (
session.query(dest_table)
.filter(PathogenLineage.pathogen_id == pathogen.id)
.count()
)
print(f"Imported {count_db_lineages} lineage rows")
if len(lineages) != count_db_lineages:
raise RuntimeError("Something went wrong loading DB. Abort!")
# This exception will bubble up, end session, cause rollback.
session.commit()
@click.command()
@click.argument(
"pathogen_slug",
required=True,
)
@click.option(
"--print-source-file",
type=bool,
is_flag=True,
help="Print raw text of the upstream linage api endpoint.",
)
@click.option(
"--parse-without-import",
type=bool,
is_flag=True,
help="Parse lineages file, but only print results instead of write to DB.",
)
@click.option(
"--test",
type=bool,
is_flag=True,
help="Run very basic smoke test.",
)
def cli(
pathogen_slug: str,
print_source_file: bool,
parse_without_import: bool,
test: bool,
):
"""Load lineages from a remote file into DB."""
if test:
print("Success!")
return # Do nothing other than basic smoke test
urls = get_lineage_urls(pathogen_slug)
print("Parsing lineages data from file...")
lineages = get_formatted_lineages(**urls, print_response=print_source_file)
print(f"Found {len(lineages)} lineages in file")
if parse_without_import:
print("Printing lineages, but NOT importing to DB")
print(lineages)
return # End here to avoid importing to DB
print("Loading lineages to DB...")
load_lineages_data(pathogen_slug, lineages)
print("Loading lineages complete!")
if __name__ == "__main__":
cli()
| chanzuckerberg/czgenepi | src/backend/aspen/workflows/import_lineages/load_lineages.py | load_lineages.py | py | 5,861 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number":... |
8425228760 | import os
from cv2.cv2 import CascadeClassifier, imread
def detect_face(image_path):
# Get user supplied values
casc_path = os.path.join(os.path.dirname(__file__), "haarcascade_frontalface_default.xml")
# Create the haar cascade
face_cascade = CascadeClassifier(casc_path)
# Read the image as greyscale
gray = imread(image_path, 0)
# Detect faces in the image
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
y = y - (h * 0.3)
h = h * 1.6
x = x - (w * 0.3)
w = w * 1.6
return x, y, x + w, y + h
return None
| wobeng/zappa_resize_image_on_fly | detect_face.py | detect_face.py | py | 748 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.cv2.CascadeClassifier",
... |
16609451607 | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from datetime import datetime
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
import os
def check_for_stock():
now = datetime.now()
time = now.strftime("%H:%M:%S")
chrome_options = Options()
chrome_options.add_argument("--window-size=1920,1080")
chrome_options.add_argument("--start-maximized")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
#chrome_options.add_argument("--headless")
chrome_options.add_experimental_option("prefs", {
"profile.default_content_settings": {"images": 2},
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
})
driver = webdriver.Chrome(chrome_options=chrome_options)
wait = WebDriverWait(driver, 20)
driver.get("https://www.bol.com/nl/p/sony-playstation-5-console/9300000004162282/?ruleRedirect=1&sI=ps5&variants=") # PS5 URL
driver.find_element_by_xpath("//*[@id='modalWindow']/div[2]/div[2]/wsp-consent-modal/div[2]/div/div[1]/button").click() # Accept cookies
try:
# If this element exists, the product is not in stock
driver.find_element_by_class_name("buy-block__title")
print(f"{time} - BOL.COM - NOT IN STOCK")
except NoSuchElementException:
print(f"{time} - BOL.COM - !! IN STOCK !! - Trying to order one for you, kind sir...")
place_order(driver, wait)
def place_order(driver, wait):
driver.find_element_by_class_name("js_btn_buy").click()
# Add product to basket
wait.until(EC.element_to_be_clickable((By.XPATH, "/html/body/div[4]/div[2]/div[3]/div[1]/div/div[2]/div/a")))
driver.get("https://www.bol.com/nl/order/basket.html")
wait.until(EC.element_to_be_clickable((By.ID, "continue_ordering_bottom")))
driver.find_element_by_id("continue_ordering_bottom").click()
# Login
driver.execute_script("document.getElementById('login_email').value='.........'") #Bol username here
driver.execute_script("document.getElementById('login_password').value='........'") #Bol password here
time.sleep(2)
driver.find_element_by_id("login_password").send_keys(Keys.ENTER)
driver.find_element_by_xpath("//*[@id='executepayment']/form/div/button").click()
print(f"{time} - BOL.COM - ORDER SUCCESFULLY PLACED") | kronolith1/ps5-bot | src/selenium_driver.py | selenium_driver.py | py | 2,797 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 18,
"usage_type": "call"
},
{
... |
31414845762 | from lib2to3.pytree import Node
from pythonds import Stack
obj = Stack() # Creating object of stack class
class Prime:
prime = {}
prime_anagram = [] # Creating prime_anagram list
prime_list = prime.prime(0, 1000) # Creating list of prime number in given range
for num in prime_list: # Checking prime number anagram or not
if num <= 10:
continue
number = prime.anagram(num)
if prime.prime_check(number) and 0 <= number <= 1000:
prime_anagram.append(number)
prime_anagram.append(num)
prime_list.remove(number)
length = len(prime_anagram) # finding the length of prime anagram list
for number in range(length): # Adding the prime anagram in to stack
num = Node(prime_anagram[number])
obj.push(num)
for number in range(length): # Printing in reverse order
data = obj.pop()
print(data, end=" ")
print("------End of program------")
| AkashBG3010/PythonPracticePrograms | DataStructuresPrograms/prime_stack.py | prime_stack.py | py | 977 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pythonds.Stack",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "lib2to3.pytree.Node",
"line_number": 27,
"usage_type": "call"
}
] |
10744162091 | import numpy as np
from pydub import AudioSegment
import librosa
def get_segment(audio, start, end):
return audio[int(start * 1000): int(end * 1000)]
def to_librosa(audiosegment):
channel_sounds = audiosegment.split_to_mono()
samples = [s.get_array_of_samples() for s in channel_sounds]
fp_arr = np.array(samples).T.astype(np.float32)
fp_arr /= np.iinfo(samples[0].typecode).max
fp_arr = fp_arr.reshape(-1)
return fp_arr
def normalize(librosa_samples, middle=None, max=13231):
# if middle is not None:
# return cut(librosa_samples, middle)
if len(librosa_samples) > max:
return cut(librosa_samples, middle)
else:
return librosa.util.pad_center(librosa_samples, max)
# cut
def cut(librosa_samples, middle=None, max_l=13231):
if middle is not None:
mid = librosa.time_to_samples(middle, sr=44100)
l_half = max_l // 2
l_ind = (max(0, mid - l_half))
r_ind = l_ind + max_l
return librosa.util.pad_center(librosa_samples[l_ind: r_ind], max_l)
else:
return librosa.util.fix_length(librosa_samples[:max_l + 1], max_l)
def prepare_segment(audio, start, end, middle=None, max_l=13231):
return normalize(to_librosa(get_segment(audio, start, end)), middle=middle, max=max_l)
def extract_segments(notes, file_name):
if len(notes) == 0:
return
notes.sort(key=lambda note: note.start_time)
pairs = list(zip(notes, notes[1:]))
first = notes[0]
audio = AudioSegment.from_file(file_name, format="wav")
first_segm = prepare_segment(audio, first.start_time, first.end_time)
other = map(lambda t: prepare_segment(audio, t[0].start_time, t[1].end_time, middle=t[1].start_time), pairs)
return [first_segm].extend(other)
| SergWh/datasets_processing | model/model.py | model.py | py | 1,776 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.iinfo",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "librosa.util.pad_center",
... |
2808409231 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = "0.3.0"
__author__ = "Abien Fred Agarap"
import argparse
from utils.data import plot_confusion_matrix
def parse_args():
parser = argparse.ArgumentParser(
description="Confusion Matrix for Intrusion Detection"
)
group = parser.add_argument_group("Arguments")
group.add_argument(
"-t",
"--training_results_path",
required=True,
type=str,
help="path where the results of model training are stored",
)
group.add_argument(
"-v",
"--validation_results_path",
required=True,
type=str,
help="path where the results of model validation are stored",
)
arguments = parser.parse_args()
return arguments
def main(argv):
training_confusion_matrix = plot_confusion_matrix(
phase="Training",
path=argv.training_results_path,
class_names=["normal", "under attack"],
)
validation_confusion_matrix = plot_confusion_matrix(
phase="Validation",
path=argv.validation_results_path,
class_names=["normal", "under attack"],
)
# display the findings from the confusion matrix
print("True negative : {}".format(training_confusion_matrix[0][0][0]))
print("False negative : {}".format(training_confusion_matrix[0][1][0]))
print("True positive : {}".format(training_confusion_matrix[0][1][1]))
print("False positive : {}".format(training_confusion_matrix[0][0][1]))
print("training accuracy : {}".format(training_confusion_matrix[1]))
# display the findings from the confusion matrix
print("True negative : {}".format(validation_confusion_matrix[0][0][0]))
print("False negative : {}".format(validation_confusion_matrix[0][1][0]))
print("True positive : {}".format(validation_confusion_matrix[0][1][1]))
print("False positive : {}".format(validation_confusion_matrix[0][0][1]))
print("validation accuracy : {}".format(validation_confusion_matrix[1]))
if __name__ == "__main__":
args = parse_args()
main(args)
| AFAgarap/gru-svm | utils/results_summary.py | results_summary.py | py | 2,156 | python | en | code | 136 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.data.plot_confusion_matrix",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utils.data.plot_confusion_matrix",
"line_number": 41,
"usage_type": "call"
}
] |
73313411625 | # -*- coding: utf-8 -*-
# @Time : 2022 09
# @Author : yicao
import csv
import os
import math
import numpy as np
import torch
from utils import model_utils
class TopKUtil:
def __init__(self, mod_len: int, sparse_rate: float = 0.05, record_top_k_value=False,
record_top_k_value_csv_name=None, a=1):
self.record_top_k_value_csv_name = record_top_k_value_csv_name
self.record_top_k_value = record_top_k_value
self.sparse_rate = sparse_rate
self.mod_len = mod_len
self.top_k_idx = int(sparse_rate * mod_len)
self.top_k_val = 0.005
self.a = a # 增强系数
self.a_k = np.ones(self.mod_len) * self.a # 上一次传输距离本轮传输的轮次
self.rs_global = 0 # 模型实际稀疏度
self.rs_size = 4 # 稀疏度打印周期
self.rs_count = 0 # 稀疏度打印计数
# 循环提取参数索引
self.loop_size = math.ceil(1 / sparse_rate)
self.loop_idx = 0
if self.record_top_k_value & (self.record_top_k_value_csv_name is not None):
with open(os.path.join('results', self.record_top_k_value_csv_name), 'w') as f:
csv_write = csv.writer(f)
csv_write.writerow(['top_k_value'])
def record_rs(self, bitmap: np.ndarray):
self.rs_global += np.count_nonzero(bitmap)
self.rs_count += 1
# 达到记录数,打印各层稀疏率
if self.rs_count == self.rs_size:
self.rs_count = 0
rs = self.rs_global / self.rs_size / self.mod_len
d = rs - self.sparse_rate
self.top_k_val = self.top_k_val * (1 + d)
# print(f"模型稀疏率为:{round(rs, 3)}, 阈值调整后的值为:{round(self.top_k_val, 4)}")
self.rs_global = 0
def get_grads_from_optim_use_k_val(self, optim: torch.optim.Optimizer):
"""
选择大于等于k_val的值进行传输
:param optim:
:return:
"""
grads_numpy = model_utils.get_grads_numpy(optim, self.mod_len)
bitmap = np.where(abs(grads_numpy) >= self.top_k_val, 1, 0)
self.record_rs(bitmap)
return bitmap, grads_numpy * bitmap
def get_grads_from_optim_use_top_k(self, optim: torch.optim.Optimizer):
"""
普通topk
:param optim:
:return:
"""
grads_tensor = model_utils.get_grads_tensor(optim, self.mod_len)
val, idx = abs(grads_tensor).topk(self.top_k_idx)
bitmap = np.zeros(self.mod_len)
bitmap[idx] = 1
if self.record_top_k_value & (self.record_top_k_value_csv_name is not None):
with open(os.path.join('results', self.record_top_k_value_csv_name), 'a+') as f:
csv_write = csv.writer(f)
csv_write.writerow([val[-1].item()])
return bitmap, (grads_tensor.numpy()) * bitmap
def get_grads_from_optim_use_top_k_layers(self, optim: torch.optim.Optimizer):
"""
分层topk
:param optim:
:return:
"""
grads_tensor = torch.empty(self.mod_len)
bitmap = np.zeros(self.mod_len)
start = 0
for param in optim.param_groups[0]['params']:
if param.grad is None:
continue
grad = param.grad.data.view(-1).cpu()
val, idx = abs(grad).topk(int(self.sparse_rate * len(grad)))
bitmap[start + idx] = 1
end = start + len(grad)
grads_tensor[start:end] = grad
start = end
return bitmap, (grads_tensor.numpy()) * bitmap
def get_grads_from_optim_use_top_k_a(self, optim: torch.optim.Optimizer):
"""
设置了α的topk
:param optim:
:return:
"""
grads_tensor = model_utils.get_grads_tensor(optim, self.mod_len)
grads_tensor_a = abs(grads_tensor) * self.a_k
val, idx = grads_tensor_a.topk(self.top_k_idx)
bitmap = np.zeros(self.mod_len)
bitmap[idx] = 1
self.a_k *= self.a
self.a_k[idx] = self.a
if self.record_top_k_value & (self.record_top_k_value_csv_name is not None):
with open(os.path.join('results', self.record_top_k_value_csv_name), 'a+') as f:
csv_write = csv.writer(f)
csv_write.writerow([grads_tensor[idx[-1]].item()])
return bitmap, (grads_tensor.numpy()) * bitmap
def get_grad_loop(self, optim: torch.optim.Optimizer):
"""
循环取参数的方法
:param optim:
:return:
"""
grads_numpy = model_utils.get_grads_numpy(optim, self.mod_len)
bitmap = np.zeros(self.mod_len)
bitmap[self.loop_idx::self.loop_size] = 1
self.loop_idx += 1
if self.loop_idx == self.loop_size:
self.loop_idx = 0
return bitmap, grads_numpy * bitmap
def get_grad_random(self, optim: torch.optim.Optimizer):
"""
随机稀疏化方法
:param optim:
:return:
"""
grads_numpy = model_utils.get_grads_numpy(optim, self.mod_len)
bitmap = np.zeros(self.mod_len)
bitmap_idx = np.arange(self.mod_len)
bitmap_idx = np.random.choice(bitmap_idx, self.top_k_idx, replace=False)
bitmap[bitmap_idx] = 1
return bitmap, grads_numpy * bitmap
def get_grad_dryden(self, optim: torch.optim.Optimizer):
"""
dryden方法,按照正负数分别进行tops
:param optim:
:return:
"""
bitmap = np.zeros(self.mod_len)
grads_tensor = model_utils.get_grads_tensor(optim, self.mod_len)
grad_po = torch.where(grads_tensor > torch.tensor(0.0), grads_tensor, torch.tensor(0.0))
idx_po = grad_po.topk(int(self.sparse_rate * grad_po.size(0))).indices
grad_ne = torch.where(grads_tensor < torch.tensor(0.0), grads_tensor, torch.tensor(0.0))
idx_ne = abs(grad_ne).topk(int(self.sparse_rate * grad_ne.size(0))).indices
bitmap[idx_po] = 1
bitmap[idx_ne] = 1
return bitmap, (grads_tensor.numpy()) * bitmap
def get_grads_top_k_residual(self, optim: torch.optim.Optimizer, residual: torch.Tensor):
"""
残差topk,累加残差
:param residual:
:param optim:
:return:
"""
grads_tensor = model_utils.get_grads_tensor(optim, self.mod_len)
residual += grads_tensor
val, idx = abs(residual).topk(self.top_k_idx)
bitmap = np.zeros(self.mod_len)
bitmap[idx] = 1
result_numpy = (residual.numpy()) * bitmap
residual[idx] = 0
return bitmap, result_numpy
def get_grads_top_k_residual_momentum(self, optim: torch.optim.Optimizer, residual: torch.Tensor):
"""
残差topk,动量残差
:param residual:
:param optim:
:return:
"""
grads_tensor = model_utils.get_grads_tensor(optim, self.mod_len)
residual = torch.where(residual == 0, grads_tensor,
torch.tensor(0.9) * residual + torch.tensor(0.1) * grads_tensor)
# residual = torch.tensor(0.9) * residual + torch.tensor(0.1) * grads_tensor
val, idx = abs(residual).topk(self.top_k_idx)
bitmap = np.zeros(self.mod_len)
bitmap[idx] = 1
result_numpy = (residual.numpy()) * bitmap
residual[idx] = 0
return bitmap, result_numpy
@staticmethod
def aggregate_grad(bitmaps: np.ndarray, grads: np.ndarray):
bitmap_sum = bitmaps.sum(0)
grad_sum = grads.sum(0)
bitmap_sum = np.where(bitmap_sum == 0, 1, bitmap_sum)
return grad_sum / bitmap_sum
| zhengLabs/FedLSC | utils/top_k_utils.py | top_k_utils.py | py | 7,698 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
... |
72745850025 | from django.http import HttpResponse
from django.shortcuts import render, redirect
from .models import Movies
from .form import MovieForm
# Create your views here.
def Home(request):
movies = Movies.objects.all()
context = {
'movie_list': movies
}
return render(request, "home.html", context)
def details(request, movie_id):
movie = Movies.objects.get(id=movie_id)
return render(request, 'details.html', {'movie':movie})
def add_movie(request):
if request.method == 'POST':
name=request.POST.get('movie-name')
desc = request.POST.get('movie-desc')
year = request.POST.get('movie-year')
image = request.FILES['movie-image']
movie = Movies(movie_name=name, movie_desc=desc, movie_year=year, movie_img=image )
movie.save()
return render(request, 'addMovie.html')
def update_movie(request, id):
movie = Movies.objects.get(id=id)
form = MovieForm(request.POST, request.FILES, instance=movie)
if form.is_valid():
form.save()
return redirect('/')
return render(request, "edit.html", {'form':form, 'movie':movie})
def delete(request, id):
if request.method == 'POST':
movie = Movies.objects.get(id=id)
movie.delete()
return redirect('/')
return render(request, 'delete.html') | neetutom/movieProject | movie_project/movieApp/views.py | views.py | py | 1,327 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Movies.objects.all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Movies.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.Movies",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "djang... |
29197648237 | import re
from . import Target, Entity
from geotext import GeoText
from spacy.lang.en.stop_words import STOP_WORDS
class LocationParser(Target):
def __init__(self):
super().__init__()
self.stop_words = STOP_WORDS
self.stop_words.add("university")
self.stop_words.add("central")
def __call__(self, document):
assert isinstance(document, dict), f"wrong input of type {type(document)} to location parser"
geo = GeoText(document["text_cleaned"])
for mention in geo.countries + geo.nationalities: # geo.cities
if mention.lower() in self.stop_words:
continue
for match in re.finditer("[\s-]*".join(mention), document["text_cleaned"], re.IGNORECASE):
country = document["text"][match.start():match.end()]
# non capitalized words result in poor precision
if country.capitalize() != country:
continue
document["entities"][Entity.COUNTRY].add(country)
# sort to match longest first
sorted_countries = sorted(document["entities"][Entity.COUNTRY], key=lambda country: len(country), reverse=True)
for country in sorted_countries:
if len(country) < 4:
continue
self.clean_text(document, country, cased=True)
# low precision -> UK, CH etc.
# geo = GeoText(document["text"][:document["abstract_start"]])
# for mention in geo.country_mentions:
# if mention.lower() in self.stop_words:
# continue
# for match in re.finditer(mention, document["text"], re.IGNORECASE):
# document["entities"][Entity.COUNTRY].add(document["text"][match.start():match.end()])
return document
| kherud/native-language-identification | pipeline/pipes/geolocation.py | geolocation.py | py | 1,801 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "spacy.lang.en.stop_words.STOP_WORDS",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "geotext.GeoText",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.finditer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.IG... |
264247579 | """
https://portswigger.net/web-security/csrf/lab-referer-validation-broken
"""
import sys
import requests
from bs4 import BeautifulSoup
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
s = requests.Session()
login_url = f'https://{site}/login'
resp = s.get(login_url)
soup = BeautifulSoup(resp.text,'html.parser')
exploit_url = soup.find('a', {'id':'exploit-link'}).get('href')
exploit_html = f'''<html>
<body>
<form action="https://{site}/change-email" method="POST">
<input type="hidden" name="email" value="pwned@evil-user.net" />
</form>
<script>
history.pushState("", "", "/?{login_url}")
document.forms[0].submit();
</script>
</body>
</html>'''
formData = {
'urlIsHttps': 'on',
'responseFile': '/exploit',
'responseHead': 'HTTP/1.1 200 OK\nContent-Type: text/html; charset=utf-8\nReferrer-Policy: unsafe-url',
'responseBody': exploit_html,
'formAction': 'STORE'
}
resp = s.post(exploit_url, data=formData) | brandonaltermatt/penetration-testing-scripts | csrf/referer-validation-broken.py | referer-validation-broken.py | py | 1,002 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "requests.Session",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
}
] |
42046912149 | import gdal2tiles
from osgeo import gdal
# -b это слой, который берем, порядок слоев 1, 2, 3 так как sample.tif в формате rgb.
def sliceToTiles(
geotiffName,
geotiffBytes,
slicesOutputPath,
optionsTranslate=['-if GTiff', '-ot Byte', '-b 1', '-b 2', '-b 3', '-of vrt', '-scale'],
optionsSliceToTiles={"nb_processes": 1}
):
"""
Function that prepares and cuts a geotiff file into fragments that are available for display in leaflet.js.
- geotiffName - name of geotiff file for preparing and slicing.
- geotiffBytes - byte array of geotiff file for preparing and slicing.
- optionsTranslate - list of options for gdal_translate (Translate options to convert 16 bit images to 8 bit).
- optionsSliceToTiles - dict of options for slicing (for gdal2tiles).
"""
gdal.FileFromMemBuffer(f"/vsimem/{geotiffName}.tiff", geotiffBytes)
image = gdal.Open(f"/vsimem/{geotiffName}.tiff")
gdal.Translate(f'/vsimem/{geotiffName}.vrt', image, options=" ".join(optionsTranslate))
gdal2tiles.generate_tiles(f'/vsimem/{geotiffName}.vrt', slicesOutputPath, **optionsSliceToTiles)
gdal.Unlink(f'/vsimem/{geotiffName}.vrt')
gdal.Unlink(f"/vsimem/{geotiffName}.tiff")
| moevm/nosql2h23-ecology | worker/app/image_processing/geotiff_slicer/slice2tiles.py | slice2tiles.py | py | 1,282 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "osgeo.gdal.FileFromMemBuffer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
... |
2405850792 | import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
class FixedRandomPermutation(nn.Module):
"""Layer with random but fixed permutations in order to mix the data"""
def __init__(self, input_dim, seed):
super(FixedRandomPermutation, self).__init__()
np.random.seed(seed)
self.in_channels = input_dim
self.permutation = np.random.permutation(self.in_channels)
np.random.seed()
self.permutation_inv = np.zeros_like(self.permutation)
for i, p in enumerate(self.permutation):
self.permutation_inv[p] = i
if torch.cuda.is_available():
self.permutation = torch.cuda.LongTensor(self.permutation)
self.permutation_inv = torch.cuda.LongTensor(self.permutation_inv)
else:
self.permutation = torch.LongTensor(self.permutation)
self.permutation_inv = torch.LongTensor(self.permutation_inv)
def forward(self, x, inverse=False):
if not inverse:
x = x[:, self.permutation]
else:
x = x[:, self.permutation_inv]
return x
class sub_network(nn.Module):
"""Fully connected subnetwork of a single coupling block"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super(sub_network, self).__init__()
# create list of hidden layers
self.fcs = nn.ModuleList([nn.Linear(in_features=input_dim, out_features=hidden_dim)])
self.fcs.extend([nn.Linear(in_features=hidden_dim, out_features=hidden_dim) for i in range(1, num_layers-1)])
self.fcs.append(nn.Linear(in_features=hidden_dim, out_features=output_dim))
def forward(self, x):
for layer in self.fcs:
x = F.leaky_relu(layer(x))
return x
class AffineCouplingBlock(nn.Module):
"""Coupling layer with affine transformations and 4 single subnetworks for each learned coefficient
Refer to implementation: https://github.com/VLL-HD/FrEIA/blob/master/FrEIA/
"""
def __init__(self, input_dim, hidden_dim, num_layers):
super(AffineCouplingBlock, self).__init__()
# Split input U into two halves
self.u1_dim = input_dim // 2
self.u2_dim = input_dim - input_dim // 2
self.clamp = 5.0 # important to bound exp
# Define scale and translation subnetworks for the two complementary affine coupling layers
self.s1 = sub_network(self.u1_dim, hidden_dim, self.u2_dim, num_layers)
self.t1 = sub_network(self.u1_dim, hidden_dim, self.u2_dim, num_layers)
self.s2 = sub_network(self.u2_dim, hidden_dim, self.u1_dim, num_layers)
self.t2 = sub_network(self.u2_dim, hidden_dim, self.u1_dim, num_layers)
def e(self, x):
"""Exponential function with clipped values to avoid too big values
Args:
x: input
Returns: exponential with clipped values
"""
return torch.exp(self.clamp * 0.636 * torch.atan(x))
def forward(self, x, inverse=False):
# Split x in two halves
u1 = torch.narrow(x, 1, 0, self.u1_dim)
u2 = torch.narrow(x, 1, self.u1_dim, self.u2_dim)
# Perform forward kinematics
if not inverse:
# v1 = u1 dotprod exp(s2(u2)) + t2(u2)
exp_2 = self.e(self.s2(u2))
v1 = u1 * exp_2 + self.t2(u2)
# v2 = u2 dotprod exp(s1(v1)) + t1(v1)
exp_1 = self.e(self.s1(v1))
v2 = u2 * exp_1 + self.t1(v1)
# Perform inverse kinematics (names of u and v are swapped)
else:
# u2 = (v2-t1(v1)) dotprod exp(-s1(v1))
exp_1 = self.e(-self.s1(u1))
v2 = (u2 - self.t1(u1)) * exp_1
# u1 = (v1-t2(u2)) dotprod exp(-s2(u2))
exp_2 = self.e(-self.s2(v2))
v1 = (u1 - self.t2(v2)) * exp_2
return torch.cat((v1, v2), 1)
class INN(nn.Module):
"""Invertible Neural Network (INN)
Paper: Analyzing inverse problems with invertible neural networks [L. Ardizzone et al. 2018]
Implementation inspired from:
https://github.com/VLL-HD/FrEIA/blob/master/FrEIA/
https://github.com/VLL-HD/analyzing_inverse_problems
"""
def __init__(self, config):
super(INN, self).__init__()
self.total_dim = config['total_dim']
self.input_dim = config['input_dim']
self.output_dim = config['output_dim']
self.hidden_dim = config['hidden_dim']
self.latent_dim = config['latent_dim']
self.num_layers_subnet = config['num_layers_subnet']
self.num_coupling_layers = config['num_coupling_layers']
# create list of hidden layers
self.fcs = nn.ModuleList()
for i in range(self.num_coupling_layers-1):
self.fcs.append(AffineCouplingBlock(self.total_dim, self.hidden_dim, self.num_layers_subnet))
self.fcs.append(FixedRandomPermutation(self.total_dim, i))
self.fcs.append(AffineCouplingBlock(self.total_dim, self.hidden_dim, self.num_layers_subnet))
def forward(self, x, inverse=False):
if not inverse:
for layer in self.fcs:
x = layer(x, inverse)
else:
for layer in reversed(self.fcs):
x = layer(x, inverse)
return x
def predict(self, tcp, device):
"""Predicts joint angles dependent on the tcp + by sampling from N(0, 1)
Args:
tcp: (x, y) coordinates of end-effector
device: 'cpu' or 'gpu'
Returns: predicted joint angles
"""
# Sample z from standard normal distribution
z = torch.randn(tcp.size()[0], self.latent_dim, device=device)
# Padding in case y_dim + z_dim < total_dim
Y_PAD = torch.zeros(tcp.size()[0], self.total_dim - self.output_dim - self.latent_dim, device=device)
# Perform inverse kinematics
y_inv = torch.cat((z, Y_PAD, tcp), dim=1)
with torch.no_grad():
output_inv = self.forward(y_inv, inverse=True)
return output_inv
def save_checkpoint(self, epoch, optimizer, loss, PATH):
torch.save({
'epoch': epoch,
'model_state_dict': self.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}, PATH)
def load_checkpoint(self, PATH, optimizer=None):
if torch.cuda.is_available():
checkpoint = torch.load(PATH)
else:
checkpoint = torch.load(PATH, map_location=torch.device('cpu'))
self.load_state_dict(checkpoint['model_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
if not optimizer == None:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
return optimizer, epoch, loss
else:
return epoch, loss
def save_weights(self, PATH):
torch.save(self.state_dict(), PATH)
def load_weights(self, PATH):
if torch.cuda.is_available():
self.load_state_dict(torch.load(PATH))
else:
self.load_state_dict(torch.load(PATH, map_location=torch.device('cpu')))
| thomasbbrunner/tum-adlr-ws20-06 | src/models/INN.py | INN.py | py | 7,218 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"lin... |
74588530663 | # coding=utf-8
"""
Encore REST services
REST Documentation : https://www.encodeproject.org/help/rest-api/
# ### Encode REST TEST
# BioREST import Encode
# encode = Encode()
# response = encode.biosample('ENCBS000AAA')
# encode.show_response(response)
"""
__author__ = "Arnaud KOPP"
__copyright__ = "© 2015-2016 KOPP Arnaud All Rights Reserved"
__credits__ = ["KOPP Arnaud"]
__license__ = "GNU GPL V3.0"
__maintainer__ = "Arnaud KOPP"
__email__ = "kopp.arnaud@gmail.com"
__status__ = "Production"
import json
import logging
from BioREST.Service import REST
log = logging.getLogger(__name__)
class Encode(REST):
"""
Class for doing REST requests to Encode
"""
def __init__(self):
super(Encode, self).__init__(name="Encode", url="https://www.encodeproject.org/")
# Force return from the server in JSON format
self.HEADERS = {'accept': 'application/json'}
def biosample(self, accession_number):
"""
Get biosample with accession number like ENCBS000AAA
:param accession_number:
:return: json object
"""
url = "biosample/" + accession_number
params = {'frame': 'object'}
response = self.http_get(url, params=params, headers=self.HEADERS)
return response
@staticmethod
def response_keys(response, first_level=True):
"""
Get all keys from response
:param response: reponse object from request
:param first_level: only first level or with sublevel
:return: list of keys
"""
if first_level:
keys = response.keys()
else:
keys = [key for key, value in response.iteritems()]
return keys
@staticmethod
def show_response(response):
"""
Print the response in pretty format
:param response:
:return:
"""
print(json.dumps(response, indent=4, separators=(',', ': ')))
def search(self, searchterm, embedded=False, **kwargs):
"""
Make a search in Encode database
:param searchterm:
:param embedded:
:param kwargs:
:return: :raise ValueError:
"""
__valid_params = ['file_format', 'experiment', 'dataset', 'type', 'md5sum']
__valid_search_type = ['file', 'replicate', 'biosample']
if embedded:
params = {'searchTerm': searchterm, 'frame': 'embedded', 'format': 'json'}
else:
params = {'searchTerm': searchterm, 'frame': 'object', 'format': 'json'}
url = "search/"
for key, value in kwargs.items():
if key in __valid_params:
if key is 'type':
if value in __valid_search_type:
params[key] = value
else:
raise ValueError('Not valid type')
else:
params[key] = value
response = self.http_get(url, params=params, headers=self.HEADERS)
return response
| ArnaudKOPP/BioREST | BioREST/Encode.py | Encode.py | py | 3,029 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "BioREST.Service.REST",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 70,
"usage_type": "call"
}
] |
35815327993 | from collections import defaultdict
from random import randint
import numpy as np
class GridWorldEnv():
def __init__(self,
height,
width,
forbidden_grids,
target_grids,
target_reward = 1,
forbidden_reward = -1,
normal_reward = 0,
hit_wall_reward = -1,
discounted_factor = 0.9
) -> None:
self.height = height
self.width = width
self.discounted_factor = discounted_factor # To remove
self.nS = width * height
# 初始化 grids
self.grids = np.zeros((height, width), dtype=float)
# [[normal_reward for _ in range(width)] for _ in range(height)]
for f_grid in forbidden_grids:
self.grids[f_grid[0]][f_grid[1]] = forbidden_reward
for t_grid in target_grids:
self.grids[t_grid[0]][t_grid[1]] = target_reward
self.target_grids = target_grids
self.hit_wall_reward = hit_wall_reward
# 初始化 action 相关
self._action_space = [(-1,0),(0, 1),(1, 0),(0, -1),(0,0)]
self.action_mappings = [" ↑ "," → "," ↓ ", " ← "," ↺ "]
self._state_ind_change = [-width,1,width,-1,0] # state index change based on action
self.action_n = len(self._action_space)
# model-based 的初始化
self.transition_probs = defaultdict(lambda: defaultdict(float))
self.expected_rewards = defaultdict(lambda: defaultdict(float))
self.P = defaultdict(lambda: defaultdict(list)) # P[s][a] = (prob, next_state, reward, is_done)
def init_model_based_transitions(self):
it = np.nditer(self.grids, flags=['multi_index'])
while not it.finished:
s = it.iterindex
i, j = it.multi_index
for action, move in enumerate(self._action_space):
y, x = i+move[0], j+move[1]
next_s = s+self._state_ind_change[action]
if x >= self.width or x < 0 or y >= self.height or y < 0:
# hitwall
self.P[s][action] = [(1, s, self.hit_wall_reward, False)]
else:
if (y,x) in self.target_grids:
self.P[s][action] = [(1, next_s, self.grids[y][x], True)]
else:
self.P[s][action] = [(1, next_s, self.grids[y][x], False)]
it.iternext()
for i in range(self.height):
for j in range(self.width):
for action, move in enumerate(self._action_space):
y, x = i+move[0], j+move[1]
if x >= self.width or x < 0 or y >= self.height or y < 0:
# hitwall
self.transition_probs[((i,j), action)][(i,j)] = 1
else:
self.transition_probs[((i,j), action)][(y,x)] = 1
for i in range(self.height):
for j in range(self.width):
state = (i,j)
for action, move in enumerate(self._action_space):
# 注意在 MDP 中 reward 只取决于当前的状态和动作, 与未来无关
y, x = i+move[0], j+move[1]
if x >= self.width or x < 0 or y >= self.height or y < 0:
# hitwall
self.expected_rewards[state][action] = self.hit_wall_reward
else:
self.expected_rewards[state][action] = self.grids[y][x]
def state_to_index(self, state):
return state[0] * self.width + state[1]
def index_to_state(self, index):
return (index // self.width, index % self.width)
def valid_actions(self, state):
return self.action_n
def step(self, state, a):
i, j = state
y, x = i + self._action_space[a][0], j + self._action_space[a][1]
if x >= self.width or x < 0 or y >= self.height or y < 0:
# hitwall
return (i, j), self.hit_wall_reward
else:
return (y, x), self.grids[y][x]
def reset(self):
# get a random start state
return randint(0, self.height - 1), randint(0, self.width - 1)
def __str__(self) -> str:
to_print = ""
for i in range(self.height):
to_print += "[ "
for j in range(self.width):
to_print += '{:2f}'.format(self.grids[i][j])
to_print += " "
# print(self.grids[i][j], end=" ")
to_print += "]\n"
return to_print | zhilu1/rl_practice | rl_envs/grid_world_env.py | grid_world_env.py | py | 4,674 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "collectio... |
29007233825 | from encodings import search_function
from selenium import webdriver
import os
from selenium.webdriver.common.keys import Keys
class Home2(webdriver.Edge):
def __init__(self,driver_path=r"C:/Users/vaish/Desktop/Self Learning/Cloud/DEVOPS/SELENIUM",teardown=False):
self.teardown=teardown
self.driver_path=driver_path
os.environ['PATH']+=self.driver_path
options=webdriver.EdgeOptions()
options.add_experimental_option('excludeSwitches',['enable-logging'])
super(Home2, self).__init__(options=options)
self.implicitly_wait(10)
self.maximize_window()
def __exit__(self, *args):
if self.teardown==False:
self.quit()
def search(self):
searchbox=self.find_element_by_id('search')
searchbox.click()
searchbox.send_keys('Sanitiser')
searchbox.send_keys(Keys.ENTER)
| Donuts252001/Netmeds | ENTERING_VALUES/home2.py | home2.py | py | 929 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Edge",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "seleniu... |
22630594275 | # insert check
# db에 입력할 것인지 안할 것인지 확인
# module
import sys
import json
from modules import Insert_data
def func(items):
franchise_list = []
# get franchise list from ./franchise_list.json
with open('franchise_list.json') as json_file:
franchise_list = json.load(json_file)
ans = ""
for i in items:
idx = items.index(i)
name = franchise_list[idx]["name"]
print(name, "plus:", len(i[0]))
try:
print(name, "sale:", len(i[1]))
except:
print(name, "sale: no data")
print("Do you want to insert this data?(y or n)")
while (ans == "y" or ans == "n" or ans == "Y" or ans == "N") == False:
ans = sys.stdin.readline()
ans = ans.replace("\n", "")
# insert data
if ans == "Y" or ans == "y":
Insert_data.func(items[idx], idx)
ans = ""
| unChae/store_list | dist/modules/Insert_check.py | Insert_check.py | py | 939 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "modules.Insert_data.func"... |
72424386023 | import json
# Path to your file
file_path = 'conv_sample'
def extract_text_from_data(data):
try:
post_list = data.get('post_list', [])
if post_list:
first_post = post_list[0]
return first_post.get('text', 'Text not found')
else:
return 'No posts in the list'
except KeyError:
return 'Text extraction error'
def main():
line_number = int(input("Enter the line number: "))
with open(file_path, 'r') as file:
data_lines = file.readlines()
if 1 <= line_number <= len(data_lines):
data_line = data_lines[line_number - 1]
try:
json_data = json.loads(data_line.strip()) # Strip newline characters
# Print the entire JSON data for debugging
# print(f"Data Point {line_number} JSON: {json_data}")
text = extract_text_from_data(json_data)
print(f"Data Point {line_number} Text: {text}")
except json.JSONDecodeError:
print(f"Error decoding JSON for Data Point {line_number}")
else:
print("Invalid line number")
if __name__ == "__main__":
main()
| charlieaccurso/charlie_research | Emoji/extract_text.py | extract_text.py | py | 1,179 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 33,
"usage_type": "attribute"
}
] |
40097697635 | import base64
import sys
import os
import datetime
import json
import requests
import scrapy
import urllib.parse
from utils import *
def findphone(name):
print(f'searching {name}')
qname=urllib.parse.quote_plus(name)
response=requests.get(f'https://www.google.com/search?hl=fr&ie=UTF-8&oe=UTF-8&q={qname}+t%C3%A9l%C3%A9phone')
if response.ok:
print('response ok')
sel=scrapy.Selector(response)
phone=sel.xpath('//div[contains(text(),"+")]/text()').re_first('\+\d\d \d \d\d \d\d \d\d \d\d')
if phone is None:
print('phone not found')
return None
else:
print('phone found')
return {'name':name,'phone':phone}
else:
print(f'response ko for {name}')
return None
def process(data, context):
name=parse_parameters(data)
result=findphone(name)
if result!=None:
sendtobq(result,'us','phone') | acrowther/findphone | main.py | main.py | py | 866 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.parse.parse.quote_plus",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "re... |
70606557225 | # 使用AKSHARE + mysql 实现动态抓取个股的交易历史数据
# 同理外面再包一层循环就可以把所有的交易历史数据下载每个股票一个表。
# 后续下载历史数据并且定制下每天更新脚本这样历史交易数据就解决了。
#
# 后续就是弄个回测框架
#
# 添加宏观因素 再添加个股微观因素 再历史回测因素相关性
import time
from datetime import datetime
import pandas as pd
import warnings
from sqlalchemy import create_engine
import akshare as ak
warnings.filterwarnings("ignore")
# 输出显示设置
pd.options.display.max_rows=None
pd.options.display.max_columns=None
pd.options.display.expand_frame_repr=False
pd.set_option('display.unicode.ambiguous_as_wide',True)
pd.set_option('display.unicode.east_asian_width',True)
# myslq url
engine = create_engine('mysql+pymysql://root:123456@hadoop102:3306/stock?charset=utf8')
# 参数设置
# period="daily" # 周期 'daily', 'weekly', 'monthly'
# start_date="20220822" # 数据获取开始日期
# end_date="20220822" # 数据获取结束日期
# adjust="hfq" # 复权类型 qfq": "前复权", "hfq": "后复权", "": "不复权"
def get_stock(period="daily",start_date=None,end_date=None,adjust="hfq",engine=engine):
"""
获取指定日期的A股数据写入mysql
:param period: 周期 'daily', 'weekly', 'monthly'
:param start_date: 数据获取开始日期
:param end_date: 数据获取结束日期
:param adjust: 复权类型 qfq": "前复权", "hfq": "后复权", "": "不复权"
:param engine: myslq url
:return: None
"""
if start_date or end_date is None:
# 今天 要下午3点后运行
start_date = datetime.date.today().strftime('%Y%m%d')
end_date = start_date
# 利用东财实时行情数据接口获取所有股票代码接口 实时接口获取数据的时候排除北京
df = ak.stock_zh_a_spot_em()
code_list = df[['代码', '名称']].values
# 获取所有历史上股票的历史数据
for i in range(len(code_list)):
ak_code = code_list[i][0]
ak_name = code_list[i][1]
# print(f"已下载到{i+1}支股票,股票代码为:{ak_code},股票名称为:{ak_name}")
try:
# 东财接口 没有市值 要从网易接口join 补充
stock_zh_a_hist_df = ak.stock_zh_a_hist(symbol=ak_code, period=period, start_date=start_date, end_date=end_date, adjust=adjust)
except Exception as e:
print(e)
try:
# 网易接口 获取 总市值 流通市值
stock_zh_a_hist_163_df = ak.stock_zh_a_hist_163(symbol=ak_code, start_date=start_date, end_date=end_date)
except Exception as e:
print(e)
try:
# 东方财富网-数据中心-特色数据-两市停复牌
stock_tfp_em_df = ak.stock_tfp_em(date="20220523")
except Exception as e:
print(e)
if stock_zh_a_hist_df.empty==False:
# # 在股票代码前加上交易所简称
if ak_code.startswith('6') == True:
stock_zh_a_hist_df['股票代码'] = ak_code + '.SH'
elif ak_code.startswith('8') or ak_code.startswith('4') == True:
stock_zh_a_hist_df['股票代码'] = ak_code + '.BJ'
else:
stock_zh_a_hist_df['股票代码'] = ak_code + '.SZ'
stock_zh_a_hist_df['股票名称'] = ak_name
stock_zh_a_hist_df.rename(columns={'日期': '交易日期', '开盘': '开盘价', '收盘': '收盘价', '最高': '最高价', '最低': '最低价'},
inplace=True)
stock_zh_a_hist_df = stock_zh_a_hist_df[
['交易日期', '股票代码', '股票名称', '开盘价', '收盘价', '最高价', '最低价', '成交量', '成交额', '振幅', '涨跌幅', '涨跌额', '换手率']]
# 排序、去重
stock_zh_a_hist_df.sort_values(by=['交易日期'], ascending=True, inplace=True)
# stock_zh_a_hist_df.drop_duplicates(subset=['股票代码','交易日期'],keep='first',inplace=True)
stock_zh_a_hist_df.reset_index(drop=True, inplace=True)
# 写入mysql 会自动建表
stock_zh_a_hist_df.to_sql('ods_dc_stock_quotes_di', engine, chunksize=100000, index=None)
# print('存入成功!')
continue
| cgyPension/pythonstudy_space | 04_learn_quantitative/akshare采集/source.py | source.py | py | 4,440 | python | zh | code | 7 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.options",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pandas.options",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "panda... |
75174338985 | import cv2, sys
import numpy as np
def main():
if len(sys.argv) < 2:
print("usage: python edgedetector.py <imagename>")
exit()
# read in the image as color and greyscale
color = cv2.imread(sys.argv[1],1)
# remove noise
color = cv2.GaussianBlur(color,(3,3),0)
cv2.imwrite("contours0.png", color)
color = find_contours(color, (255, 255, 255), True)
cv2.imwrite("contours1.png", color)
color = find_contours(color, (255, 255, 255))
cv2.imwrite("contours2.png", color)
color = find_contours(color, (0, 255, 0))
cv2.imwrite("contours3.png", color)
def find_edges(img):
# convolute with proper kernels
sobel = cv2.Sobel(img,cv2.CV_64F,1,1,ksize=13) # x
cv2.imwrite("laplace.png", laplacian)
cv2.imwrite("sobel.png", sobel)
cv2.imwrite("sobely.png", sobely)
#find_edges(img, colorimg)
def find_contours(img, color, infill=False):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,gray = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
print("found contours:", len(contours))
if infill:
print("INFILL")
cv2.drawContours(img, contours, -1, color, -1)
else:
cv2.drawContours(img, contours, -1, color, 1)
return img
if __name__=='__main__':
main()
| squeakus/bitsandbytes | opencv/sobel.py | sobel.py | py | 1,368 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_numb... |
6752261336 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QDialog, QTreeWidgetItem
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from product.controllers.productcontroller import ProductController
from labrecord.controllers.labrecordscontroller import LabrecordsController
from verification.views.selectrecords import Ui_Dialog
class SelectRecordsModule(QDialog, Ui_Dialog):
selected = pyqtSignal(int, list)
def __init__(self, kind, parent=None):
super(SelectRecordsModule, self).__init__(parent)
self.kind = kind
self.detail = []
self.PC = ProductController()
self.LC = LabrecordsController()
self.setupUi(self)
self.treeWidget_filelist.hideColumn(1)
# 或取记录
self.get_generalrecords()
def get_generalrecords(self):
if self.kind == 0:
res = self.PC.get_data(3, False, *VALUES_TUPLE_PP)
for item in res:
qtreeitem = QTreeWidgetItem(self.treeWidget_filelist)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(1, item['prodid'])
qtreeitem.setText(2, item['prodname'])
qtreeitem.setText(3, item['batchno'])
qtreeitem.setText(4, item['spec'])
qtreeitem.setText(5, item['package'])
else:
res = self.LC.get_data(0, False, *VALUES_TUPLE_LR)
for item in res:
qtreeitem = QTreeWidgetItem(self.treeWidget_filelist)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(1, item['chkid'])
qtreeitem.setText(2, item['chkname'])
qtreeitem.setText(3, item['batchno'])
qtreeitem.setText(4, item['spec'])
qtreeitem.setText(5, item['package'])
for i in range(1, 6):
self.treeWidget_filelist.resizeColumnToContents(i)
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_filelist_itemDoubleClicked(self, qitem, p_int):
self.selected.emit(self.kind, [int(qitem.text(0)),])
self.accept()
@pyqtSlot()
def on_pushButton_accept_clicked(self):
items = self.treeWidget_filelist.selectedItems()
select_list = []
for item in items:
select_list.append(int(item.text(0)))
if len(select_list):
self.selected.emit(self.kind, select_list)
self.accept()
@pyqtSlot()
def on_pushButton_cancel_clicked(self):
self.close()
VALUES_TUPLE_PP = ('autoid', 'prodid', 'prodname', 'spec', 'package', 'batchno')
VALUES_TUPLE_LR = ('autoid', 'chkid', 'chkname', 'spec', 'package', 'batchno')
| zxcvbnmz0x/gmpsystem | verification/modules/selectrecordsmodule.py | selectrecordsmodule.py | py | 2,672 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "verification.views.selectrecords.Ui_Dialog",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 12,
"usage_type": "call"
},... |
6994689610 | from lib.cuckoo.common.abstracts import Signature
class InjectionRunPE(Signature):
"""Works much like InjectionThread from injection_thread.py - so please
read its comment there to find out about the internal workings of this
signature."""
name = "injection_runpe"
description = "Executed a process and injected code into it, probably while unpacking"
severity = 5
categories = ["injection"]
authors = ["glysbaysb", "Accuvant"]
minimum = "2.0"
filter_apinames = [
"CreateProcessInternalW",
"NtUnmapViewOfSection",
"NtAllocateVirtualMemory",
"NtGetContextThread",
"WriteProcessMemory",
"NtWriteVirtualmemory",
"NtMapViewOfSection",
"NtSetContextThread",
"NtResumeThread",
]
_current_process_handles = "0xffffffff", "0xffffffffffffffff"
def init(self):
self.functions = {}
def on_process(self, process):
self.functions[process["pid"]] = set()
def on_call(self, call, process):
# We don't care about the current process.
process_handle = call["arguments"].get("process_handle")
if process_handle in self._current_process_handles:
return
self.functions[process["pid"]].add(call["api"])
self.mark_call()
def on_complete(self):
for pid, functions in self.functions.items():
if len(functions) >= 4:
return True
| cuckoosandbox/community | modules/signatures/windows/injection_runpe.py | injection_runpe.py | py | 1,453 | python | en | code | 312 | github-code | 36 | [
{
"api_name": "lib.cuckoo.common.abstracts.Signature",
"line_number": 3,
"usage_type": "name"
}
] |
41538262741 | import random
import string
from django.db import transaction
from django.db.models import Q
from django.shortcuts import render,redirect
from django.core.mail import send_mail
from django.http import HttpResponse, response
from django_redis import get_redis_connection
from redis import Redis
from user.captcha.image import ImageCaptcha
# Create your views here.
from user.models import User
from user.utils.hash_code import hash_pwd, hash_email
from user.utils.salt import get_salt
def captcha(request):
img = ImageCaptcha()
cc = random.sample(string.ascii_letters + string.digits, 4)
cc = "".join(cc)
data = img.generate(cc)
# 返回二进制数据
request.session['captcha'] = cc
return HttpResponse(data, "img/png")
def register(request):
return render(request, "user/register.html")
def user_register(request):
# 1. 获取前端传递的参数并判断参数是是否合法
phone = request.POST.get("phone")
email = request.POST.get("email")
pwd = request.POST.get("user_pwd")
sex = request.POST.get("sex")
email_code = request.POST.get("email_code")
if sex == 'm':
sex = True
else:
sex = False
# 2. 比对邮箱验证码是否一致
# redis = Redis(host="192.168.92.128", port=7000)
# code = redis.get(email)
# redis.setex(email,300,code)
# redis_connection = get_redis_connection("default")
# code = redis_connection.get(email)
# 如果code不存在,则代表验证码已过期
code = request.session.get('code')
if email_code != code:
return HttpResponse("邮箱验证码不一致")
# 3. 将用户的数据保存至数据库
salt = get_salt()
hash_password = hash_pwd(pwd, salt)
number = request.POST.get('number')
cc2 = request.session.get('captcha')
number = number.lower()
cc2 = cc2.lower()
with transaction.atomic():
if (number == cc2):
user = User.objects.create(email=email, phone=phone, password=hash_password, sex=sex,salt=salt)
if user:
return redirect("/user/login")
else:
return HttpResponse('验证码错误')
# 4. 如果对象保存成功,重定向到渲染登录页面的视图
return redirect("user:login")
def send_email(request):
request.session.flush()
email = request.GET.get("email")
subject = "欢迎您注册百知教育员工系统"
# 随机验证码
code = random.sample(string.digits,6)
code = ''.join(code)
# 在发送验证码时需要将验证码储存起来,方便注册时验证,并设置验证码的有效期
if request.session.get('code'):
del request.session['code']
request.session['code'] = code
# redis = Redis(host="192.168.92.128", port=7000)
# redis.setex(email, 300, code)
# redis_connection = get_redis_connection("default")
# redis_connection.setex(email, 300, code)
# request.session['code'] = code
# 判断email以及email是否合法
if email:
status = send_mail(
subject,
f"这是您的注册码{code},请在5分钟内完成注册",
"3202448109@qq.com",
[email],
)
if status:
return HttpResponse("邮件发送成功")
return HttpResponse("邮件发送失败,请稍等")
def login(request):
try:
name = request.COOKIES.get('name')
pwd = request.COOKIES.get('pwd')
with transaction.atomic():
user = User.objects.filter(Q(email=name, password=pwd) | Q(phone=name, password=pwd)).first()
if user:
request.session['is'] = True
request.session['name'] = name
return redirect('http://127.0.0.1:8000/ems/index/')
else:
return render(request,"user/login.html")
except:
return render(request,"user/login.html")
def login2(request):
try:
name = request.POST['name']
pwd = request.POST['pwd']
check = request.POST.get('check')
with transaction.atomic():
user = User.objects.filter(Q(email=name) | Q(phone=name)).first()
salt = user.salt
# 再次对密码进行加密对比
hash_pwd1 = hash_pwd(pwd, salt)
user = User.objects.filter(Q(email=name, password=hash_pwd1) | Q(phone=name, password=hash_pwd1))
if user:
request.session['is'] = True
request.session['name'] = name
if check == '1':
response = redirect('http://127.0.0.1:8000/ems/index/')
response.set_cookie('name',name,max_age=3600 * 24 * 7)
response.set_cookie('pwd',hash_pwd1 ,max_age=3600 * 24 * 7)
return response
else:
response = redirect('http://127.0.0.1:8000/ems/index/')
return response
else:
return HttpResponse('用户名或密码错误')
except:
return HttpResponse('登录失败')
def checkEmail2(request):
email = request.POST.get('email')
user = User.objects.filter(email=email)
if user:
return HttpResponse('邮箱已存在')
else:
return HttpResponse()
def checkPhone2(request):
phone = request.POST.get('phone')
user = User.objects.filter(phone=phone)
if user:
return HttpResponse('手机号已存在')
else:
return HttpResponse()
def check(request):
# 1. 接收到前端的参数
name = request.GET.get("name")
# 2. 查询用户名是否已存在
user_set = User.objects.filter(Q(email=name) | Q(phone=name))
if user_set:
return HttpResponse("用户名已存在")
return HttpResponse("") | pengbin0205/git_one | user/views.py | views.py | py | 5,801 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "user.captcha.image.ImageCaptcha",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name"... |
16825022923 | import collections
import numpy as np
import pandas as pd
import nltk, string
from nltk import word_tokenize # Convert paragraph in tokens
from sklearn.feature_extraction.text import TfidfVectorizer
nltk.download('punkt')
text_data = pd.read_csv("Text_Similarity_Dataset.csv")
stemmer = nltk.stem.porter.PorterStemmer()
remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)
def stem_tokens(tokens):
return [stemmer.stem(item) for item in tokens]
'''removing punctuation, lowercase, stem'''
def normalize(text):
return stem_tokens(nltk.word_tokenize(text.lower().translate(remove_punctuation_map)))
vectorizer = TfidfVectorizer(tokenizer=normalize, stop_words='english')
def cosine_sim(text1, text2):
tfidf = vectorizer.fit_transform([text1, text2])
return ((tfidf * tfidf.T).A)[0,1] #p [0,1] is the positions in the matrix for the similarity
similarity = []
for i in text_data.index:
sent1 = text_data['text1'][i]
sent2 = text_data['text2'][i]
similarity.append(cosine_sim(sent1,sent2))
final_score = pd.DataFrame({'Unique_ID':text_data.Unique_ID,
'Similarity_score':similarity})
final_score.to_csv('final_score.csv',index=False) | centipede13/Text_Similarity | STS_Pred.py | STS_Pred.py | py | 1,235 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "nltk.stem.porter.PorterStemmer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.ste... |
74032141222 | """
Extract loss plots from log file
"""
import matplotlib.pyplot as plt
import numpy as np
import Config
def main():
train_loss_l = np.empty((0, 5))
train_class_l = np.empty((0, 3))
train_metric_l = np.empty((0, 3))
valid_loss_l = np.empty((0, 5))
valid_class_l = np.empty((0, 3))
valid_metric_l = np.empty((0, 3))
test_loss_l = np.empty((0, 5))
test_class_l = np.empty((0, 3))
test_metric_l = np.empty((0, 3))
with open(file_path, 'r') as LOG:
for line in LOG.readlines()[:-1]:
if 'weight updated' in line:
continue
val = line.split('\t')
# epoch = val[0].split(' ')[1]
# epoch_list.append(int(epoch))
train_val = val[2].split(':')[-1]
valid_val = val[3].split(':')[-1]
test_val = val[4].split(':')[-1]
train_val = train_val.split(',')
valid_val = valid_val.split(',')
test_val = test_val.split(',')
train_loss, train_class, train_metric = train_val[0], train_val[1], train_val[2]
valid_loss, valid_class, valid_metric = valid_val[0], valid_val[1], valid_val[2]
test_loss, test_class, test_metric = test_val[0], test_val[1], test_val[2]
train_loss = train_loss.split('[')[-1][:-1]
train_loss = np.fromstring(train_loss, sep=' ')
train_class = train_class.split('[')[-1][:-1]
train_class = np.fromstring(train_class, sep=' ')
train_metric = train_metric.split('[')[-1][:-1]
train_metric = np.fromstring(train_metric, sep=' ')
valid_loss = valid_loss.split('[')[-1][:-1]
valid_loss = np.fromstring(valid_loss, sep=' ')
valid_class = valid_class.split('[')[-1][:-1]
valid_class = np.fromstring(valid_class, sep=' ')
valid_metric = valid_metric.split('[')[-1][:-1]
valid_metric = np.fromstring(valid_metric, sep=' ')
test_loss = test_loss.split('[')[-1][:-1]
test_loss = np.fromstring(test_loss, sep=' ')
test_class = test_class.split('[')[-1][:-1]
test_class = np.fromstring(test_class, sep=' ')
test_metric = test_metric.split('[')[-1][:-1]
test_metric = np.fromstring(test_metric, sep=' ')
train_loss_l = np.concatenate([train_loss_l, np.expand_dims(train_loss, axis=0)], axis=0)
train_class_l = np.concatenate([train_class_l, np.expand_dims(train_class, axis=0)], axis=0)
train_metric_l = np.concatenate([train_metric_l, np.expand_dims(train_metric, axis=0)], axis=0)
valid_loss_l = np.concatenate([valid_loss_l, np.expand_dims(valid_loss, axis=0)], axis=0)
valid_class_l = np.concatenate([valid_class_l, np.expand_dims(valid_class, axis=0)], axis=0)
valid_metric_l = np.concatenate([valid_metric_l, np.expand_dims(valid_metric, axis=0)], axis=0)
test_loss_l = np.concatenate([test_loss_l, np.expand_dims(test_loss, axis=0)], axis=0)
test_class_l = np.concatenate([test_class_l, np.expand_dims(test_class, axis=0)], axis=0)
test_metric_l = np.concatenate([test_metric_l, np.expand_dims(test_metric, axis=0)], axis=0)
# print(val)
# print(epoch)
# print(train_val, valid_val, test_val)
# validation min epoch
min_epoch_valid = np.argmin(valid_loss_l[:, 0])
# performance at min_epoch
print(f"Min valid loss: {valid_loss_l[min_epoch_valid, 0]} at epoch {min_epoch_valid}")
print(f"Valid metrics at epoch {min_epoch_valid}: {valid_metric_l[min_epoch_valid]}")
print(f"Test metrics at epoch {min_epoch_valid}: {test_metric_l[min_epoch_valid]}")
max_epoch_F1 = np.argmax(valid_metric_l[:, 2])
print(f"Min F1 loss: {valid_metric_l[max_epoch_F1, 2]} at epoch {max_epoch_F1}")
print(f"Valid metrics at epoch {max_epoch_F1}: {valid_metric_l[max_epoch_F1]}")
print(f"Test metrics at epoch {max_epoch_F1}: {test_metric_l[max_epoch_F1]}")
if LOG:
with open(file_dir + 'models.log', 'a') as LOG:
LOG.write(f"{model_id}: \t")
LOG.write(f"Min Valid loss: epoch {min_epoch_valid} \t Loss: {valid_loss_l[min_epoch_valid, 0]} \t")
LOG.write(f"Valid metrics: {valid_metric_l[min_epoch_valid]} \t")
LOG.write(f"Test metrics: {test_metric_l[min_epoch_valid]} \t")
LOG.write(f"Max Valid F1: epoch {max_epoch_F1} \t F1: {valid_metric_l[max_epoch_F1, 2]} \t")
LOG.write(f"Valid metrics: {valid_metric_l[max_epoch_F1]} \t")
LOG.write(f"Test metrics: {test_metric_l[max_epoch_F1]} \n")
# PLOTTING #
# 9*9 subplots
train_c, valid_c, test_c = 'b', 'orange', 'r'
fig, ax = plt.subplots(3, 3, figsize=[12, 9])
# [0, 0]: Total Loss
ax[0, 0].plot(train_loss_l[:, 0], color=train_c)
ax[0, 0].plot(valid_loss_l[:, 0], color=valid_c)
ax[0, 0].plot(test_loss_l[:, 0], color=test_c)
ax[0, 0].set_title('Total Loss')
# [0, 1]: R-CNN Loss Classifier
ax[0, 1].plot(train_loss_l[:, 1], color=train_c)
ax[0, 1].plot(valid_loss_l[:, 1], color=valid_c)
ax[0, 1].plot(test_loss_l[:, 1], color=test_c)
ax[0, 1].set_title('R-CNN Loss Classifier')
# [0, 2]: R-CNN Loss Regression
ax[0, 2].plot(train_loss_l[:, 2], color=train_c)
ax[0, 2].plot(valid_loss_l[:, 2], color=valid_c)
ax[0, 2].plot(test_loss_l[:, 2], color=test_c)
ax[0, 2].set_title('R-CNN Loss Regression')
# [1, 1]: RPN Loss Objectness
ax[1, 1].plot(train_loss_l[:, 3], color=train_c)
ax[1, 1].plot(valid_loss_l[:, 3], color=valid_c)
ax[1, 1].plot(test_loss_l[:, 3], color=test_c)
ax[1, 1].set_title('RPN Loss Objectness')
# [1, 2]: RPN Loss Regression
ax[1, 2].plot(train_loss_l[:, 4], color=train_c)
ax[1, 2].plot(valid_loss_l[:, 4], color=valid_c)
ax[1, 2].plot(test_loss_l[:, 4], color=test_c)
ax[1, 2].set_title('RPN Loss Regression')
# [2, 0]: Precision
ax[2, 0].plot(train_metric_l[:, 0], color=train_c)
ax[2, 0].plot(valid_metric_l[:, 0], color=valid_c)
ax[2, 0].plot(test_metric_l[:, 0], color=test_c)
ax[2, 0].set_title('Precision')
ax[2, 0].set_ylim([0, 1.05])
# [2, 1]: Recall
ax[2, 1].plot(train_metric_l[:, 1], color=train_c)
ax[2, 1].plot(valid_metric_l[:, 1], color=valid_c)
ax[2, 1].plot(test_metric_l[:, 1], color=test_c)
ax[2, 1].set_title('Recall')
ax[2, 1].set_ylim([0, 1.05])
# [2, 2]: F Score
ax[2, 2].plot(train_metric_l[:, 2], color=train_c)
ax[2, 2].plot(valid_metric_l[:, 2], color=valid_c)
ax[2, 2].plot(test_metric_l[:, 2], color=test_c)
ax[2, 2].set_title('F Score')
ax[2, 2].set_ylim([0, 1.05])
# plt.setp(ax[:, :], legend=['train', 'valid', 'test'])
plt.setp(ax[:, :], xlabel='Epoch')
plt.setp(ax[:2, :], ylabel='Loss')
plt.setp(ax[2, :], ylabel='%')
plt.subplots_adjust(wspace=0.5, hspace=0.5)
fig.legend(['train', 'valid', 'test'], loc='upper right')
plt.suptitle(model_id, size=16)
if save:
plt.savefig(op_dir + model_id + "_fig.png")
if show:
plt.show()
if __name__ == '__main__':
show = True
save = True
log = True
file_dir = "/home/chentyt/Documents/4tb/Tiana/P100ObjDet/Model/"
model_id = Config.model_id
# model_id = "2022-03-31_Resnet_simple_F1Val_rpnNms0.3_boxBatchSize:64,lr1e-05,clsAlpha('None',),n_channel32,
# kSize5anchorScale(128,256,512)"
op_dir = "/home/chentyt/Documents/4tb/Tiana/P100ObjDet/Model/"
file_path = file_dir + model_id + '_log'
main()
| Tianananana/Angio-Stenosis-Detection | LossPlot.py | LossPlot.py | py | 7,644 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "numpy.empty",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": ... |
23413091064 | # -*- coding: utf-8 -*-
"""
A auto compressed disk cache backed requests maker.
"""
import typing
import requests
from diskcache import Cache
from .decode import decoder
class CachedRequest(object):
"""
Implement a disk cache backed html puller, primarily using ``requests`` library.
Usage:
.. code-block:: python
import pytest
from crawlib import create_cache_here, CachedRequest
from xxx import parse_html
cache = create_cache_here(__file__)
spider = CachedRequest(cache=cache)
def test_parse_html_function():
url = "https://www.python.org/downloads/"
html = spider.request_for_html(url) # equivalent to requests.get(url)
# validate your parse html function
result = parse_html(html)
To make post request:
.. code-block:: python
def test_parse_html_function():
url = "https://www.python.org/downloads/"
html = spider.request_for_html(
url,
request_method=requests.post,
request_kwargs={"data": ...},
)
# validate your parse html function
result = parse_html(html)
**中文文档**
在为爬虫程序写测试时, 由于我们要对 针对某一类 URL 所对应的 Html 进行数据抽取的函数
进行测试, 我们希望在一段时间内, 比如1天内, 只爬取一次. 使得在本地机器上反复测试时,
可以不用每次等待爬取. **以加快每次测试的速度**.
"""
def __init__(self,
cache: Cache,
log_cache_miss: bool = False,
expire: int = 24 * 3600):
"""
:type cache: Cache
:param cache:
:type log_cache_miss: bool
:param log_cache_miss: default False
:type expire: int
:param expire: default expire time for cache
"""
if not isinstance(cache, Cache):
raise TypeError
self.cache = cache
self.log_cache_miss = log_cache_miss
self.expire = expire
self.use_which = "requests" # type: str
self.get_html_method = self.get_html_method_for_requests # type: callable
self.use_requests()
def use_requests(self):
self.use_which = "requests"
self.get_html_method = self.get_html_method_for_requests
def get_html_method_for_requests(self,
response: requests.Response,
encoding: str = None,
errors: str = "strict",
**kwargs) -> str:
"""
Get html from ``requests.Response`` object.
:param response: the return of ``requests.request(method, url, **kwargs)``
:param encoding: manually specify the encoding.
:param errors: errors handle method.
:return: html
"""
return decoder.decode(
binary=response.content,
url=response.url,
encoding=encoding,
errors=errors,
)
def get_binary_method_for_requests(self,
response: requests.Response,
**kwargs) -> bytes:
"""
Get binary data from ``requests.Response`` object.
:param response:
:param kwargs:
:return: binary data
"""
return response.content
# Frequently used method
def request_for_html(self,
url: str,
get_html_method: typing.Callable = None,
get_html_method_kwargs: dict = None,
request_method: typing.Callable = None,
request_kwargs: dict = None,
cache_expire: int = None,
cacheable_callback: typing.Callable = lambda html: True) -> str:
"""
:param url:
:param get_html_method: a callable method takes requests.Response as
first argument, returns html.
:param get_html_method_kwargs:
:param request_method: requests.get or requests.post
:param request_kwargs:
:param cacheable_callback: a method takes html as single argument,
if returns True, then update cache. otherwise do nothing.
**中文文档**
使用 ``requests.request()`` 执行 HTTP request, 返回 HTML.
永远优先尝试使用缓存. 如果缓存未命中, 则执行 HTTP request. 并用
cacheable_callback 检查 html, 如果返回 True, 则更新缓存. 如果返回 False
则不更新缓存.
"""
if get_html_method is None:
get_html_method = self.get_html_method
if get_html_method_kwargs is None:
get_html_method_kwargs = dict()
if request_method is None:
request_method = requests.get
if request_kwargs is None:
request_kwargs = dict()
if cache_expire is None:
cache_expire = self.expire
if self.use_which == "requests":
if "url" not in request_kwargs:
request_kwargs["url"] = url
if url in self.cache:
return self.cache[url]
else:
if self.log_cache_miss:
msg = "{} doesn't hit cache!".format(url)
print(msg)
response = request_method(**request_kwargs)
html = get_html_method(response, **get_html_method_kwargs)
if cacheable_callback(html):
self.cache.set(url, html, cache_expire)
return html
| MacHu-GWU/crawlib-project | crawlib/cached_request.py | cached_request.py | py | 5,737 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "diskcache.Cache",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "diskcache.Cache",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "requests.Response",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "decode.dec... |
14878275140 | import argparse
import csv
import itertools
import os
import subprocess
import sys
import tempfile
from typing import Any, Callable, Dict, Generic, Iterable, List, NamedTuple, TextIO, Tuple, TypeVar, Optional, Union
# The following command line options participate in the combinatorial generation.
# All other arguments have a global effect.
_COMBINATORIAL_OPTIONS=['packages', 'readaheads', 'compiler_filters']
_TRACING_READAHEADS=['mlock', 'fadvise']
_FORWARD_OPTIONS={'loop_count': '--count'}
_RUN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'run_app_with_prefetch')
RunCommandArgs = NamedTuple('RunCommandArgs', [('package', str), ('readahead', str), ('compiler_filter', Optional[str])])
CollectorPackageInfo = NamedTuple('CollectorPackageInfo', [('package', str), ('compiler_filter', str)])
_COLLECTOR_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'collector')
_COLLECTOR_TIMEOUT_MULTIPLIER = 2 # take the regular --timeout and multiply by 2; systrace starts up slowly.
_UNLOCK_SCREEN_SCRIPT=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'unlock_screen')
# This must be the only mutable global variable. All other global variables are constants to avoid magic literals.
_debug = False # See -d/--debug flag.
_DEBUG_FORCE = None # Ignore -d/--debug if this is not none.
# Type hinting names.
T = TypeVar('T')
NamedTupleMeta = Callable[..., T] # approximation of a (S : NamedTuple<T> where S() == T) metatype.
def parse_options(argv: List[str] = None):
"""Parse command line arguments and return an argparse Namespace object."""
parser = argparse.ArgumentParser(description="Run one or more Android applications under various settings in order to measure startup time.")
# argparse considers args starting with - and -- optional in --help, even though required=True.
# by using a named argument group --help will clearly say that it's required instead of optional.
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument('-p', '--package', action='append', dest='packages', help='package of the application', required=True)
required_named.add_argument('-r', '--readahead', action='append', dest='readaheads', help='which readahead mode to use', choices=('warm', 'cold', 'mlock', 'fadvise'), required=True)
# optional arguments
# use a group here to get the required arguments to appear 'above' the optional arguments in help.
optional_named = parser.add_argument_group('optional named arguments')
optional_named.add_argument('-c', '--compiler-filter', action='append', dest='compiler_filters', help='which compiler filter to use. if omitted it does not enforce the app\'s compiler filter', choices=('speed', 'speed-profile', 'quicken'))
optional_named.add_argument('-s', '--simulate', dest='simulate', action='store_true', help='Print which commands will run, but don\'t run the apps')
optional_named.add_argument('-d', '--debug', dest='debug', action='store_true', help='Add extra debugging output')
optional_named.add_argument('-o', '--output', dest='output', action='store', help='Write CSV output to file.')
optional_named.add_argument('-t', '--timeout', dest='timeout', action='store', type=int, help='Timeout after this many seconds when executing a single run.')
optional_named.add_argument('-lc', '--loop-count', dest='loop_count', default=1, type=int, action='store', help='How many times to loop a single run.')
optional_named.add_argument('-in', '--inodes', dest='inodes', type=str, action='store', help='Path to inodes file (system/extras/pagecache/pagecache.py -d inodes)')
return parser.parse_args(argv)
# TODO: refactor this with a common library file with analyze_metrics.py
def _debug_print(*args, **kwargs):
"""Print the args to sys.stderr if the --debug/-d flag was passed in."""
if _debug:
print(*args, **kwargs, file=sys.stderr)
def _expand_gen_repr(args):
"""Like repr but any generator-like object has its iterator consumed
and then called repr on."""
new_args_list = []
for i in args:
# detect iterable objects that do not have their own override of __str__
if hasattr(i, '__iter__'):
to_str = getattr(i, '__str__')
if to_str.__objclass__ == object:
# the repr for a generator is just type+address, expand it out instead.
new_args_list.append([_expand_gen_repr([j])[0] for j in i])
continue
# normal case: uses the built-in to-string
new_args_list.append(i)
return new_args_list
def _debug_print_gen(*args, **kwargs):
"""Like _debug_print but will turn any iterable args into a list."""
if not _debug:
return
new_args_list = _expand_gen_repr(args)
_debug_print(*new_args_list, **kwargs)
def _debug_print_nd(*args, **kwargs):
"""Like _debug_print but will turn any NamedTuple-type args into a string."""
if not _debug:
return
new_args_list = []
for i in args:
if hasattr(i, '_field_types'):
new_args_list.append("%s: %s" %(i.__name__, i._field_types))
else:
new_args_list.append(i)
_debug_print(*new_args_list, **kwargs)
def dict_lookup_any_key(dictionary: dict, *keys: List[Any]):
for k in keys:
if k in dictionary:
return dictionary[k]
raise KeyError("None of the keys %s were in the dictionary" %(keys))
def generate_run_combinations(named_tuple: NamedTupleMeta[T], opts_dict: Dict[str, List[Optional[str]]])\
-> Iterable[T]:
"""
Create all possible combinations given the values in opts_dict[named_tuple._fields].
:type T: type annotation for the named_tuple type.
:param named_tuple: named tuple type, whose fields are used to make combinations for
:param opts_dict: dictionary of keys to value list. keys correspond to the named_tuple fields.
:return: an iterable over named_tuple instances.
"""
combinations_list = []
for k in named_tuple._fields:
# the key can be either singular or plural , e.g. 'package' or 'packages'
val = dict_lookup_any_key(opts_dict, k, k + "s")
# treat {'x': None} key value pairs as if it was [None]
# otherwise itertools.product throws an exception about not being able to iterate None.
combinations_list.append(val or [None])
_debug_print("opts_dict: ", opts_dict)
_debug_print_nd("named_tuple: ", named_tuple)
_debug_print("combinations_list: ", combinations_list)
for combo in itertools.product(*combinations_list):
yield named_tuple(*combo)
def key_to_cmdline_flag(key: str) -> str:
"""Convert key into a command line flag, e.g. 'foo-bars' -> '--foo-bar' """
if key.endswith("s"):
key = key[:-1]
return "--" + key.replace("_", "-")
def as_run_command(tpl: NamedTuple) -> List[Union[str, Any]]:
"""
Convert a named tuple into a command-line compatible arguments list.
Example: ABC(1, 2, 3) -> ['--a', 1, '--b', 2, '--c', 3]
"""
args = []
for key, value in tpl._asdict().items():
if value is None:
continue
args.append(key_to_cmdline_flag(key))
args.append(value)
return args
def generate_group_run_combinations(run_combinations: Iterable[NamedTuple], dst_nt: NamedTupleMeta[T])\
-> Iterable[Tuple[T, Iterable[NamedTuple]]]:
def group_by_keys(src_nt):
src_d = src_nt._asdict()
# now remove the keys that aren't legal in dst.
for illegal_key in set(src_d.keys()) - set(dst_nt._fields):
if illegal_key in src_d:
del src_d[illegal_key]
return dst_nt(**src_d)
for args_list_it in itertools.groupby(run_combinations, group_by_keys):
(group_key_value, args_it) = args_list_it
yield (group_key_value, args_it)
def parse_run_script_csv_file(csv_file: TextIO) -> List[int]:
"""Parse a CSV file full of integers into a flat int list."""
csv_reader = csv.reader(csv_file)
arr = []
for row in csv_reader:
for i in row:
if i:
arr.append(int(i))
return arr
def make_script_command_with_temp_output(script: str, args: List[str], **kwargs)\
-> Tuple[str, TextIO]:
"""
Create a command to run a script given the args.
Appends --count <loop_count> --output <tmp-file-name>.
Returns a tuple (cmd, tmp_file)
"""
tmp_output_file = tempfile.NamedTemporaryFile(mode='r')
cmd = [script] + args
for key, value in kwargs.items():
cmd += ['--%s' %(key), "%s" %(value)]
if _debug:
cmd += ['--verbose']
cmd = cmd + ["--output", tmp_output_file.name]
return cmd, tmp_output_file
def execute_arbitrary_command(cmd: List[str], simulate: bool, timeout: int) -> Tuple[bool, str]:
if simulate:
print(" ".join(cmd))
return (True, "")
else:
_debug_print("[EXECUTE]", cmd)
proc = subprocess.Popen(cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True)
try:
script_output = proc.communicate(timeout=timeout)[0]
except subprocess.TimeoutExpired:
print("[TIMEDOUT]")
proc.kill()
script_output = proc.communicate()[0]
_debug_print("[STDOUT]", script_output)
return_code = proc.wait()
passed = (return_code == 0)
_debug_print("[$?]", return_code)
if not passed:
print("[FAILED, code:%s]" %(return_code), script_output, file=sys.stderr)
return (passed, script_output)
def execute_run_combos(grouped_run_combos: Iterable[Tuple[CollectorPackageInfo, Iterable[RunCommandArgs]]], simulate: bool, inodes_path: str, timeout: int, loop_count: int, need_trace: bool):
# nothing will work if the screen isn't unlocked first.
execute_arbitrary_command([_UNLOCK_SCREEN_SCRIPT], simulate, timeout)
for collector_info, run_combos in grouped_run_combos:
#collector_args = ["--package", package_name]
collector_args = as_run_command(collector_info)
# TODO: forward --wait_time for how long systrace runs?
# TODO: forward --trace_buffer_size for size of systrace buffer size?
collector_cmd, collector_tmp_output_file = make_script_command_with_temp_output(_COLLECTOR_SCRIPT, collector_args, inodes=inodes_path)
with collector_tmp_output_file:
collector_passed = True
if need_trace:
collector_timeout = timeout and _COLLECTOR_TIMEOUT_MULTIPLIER * timeout
(collector_passed, collector_script_output) = execute_arbitrary_command(collector_cmd, simulate, collector_timeout)
# TODO: consider to print a ; collector wrote file to <...> into the CSV file so we know it was ran.
for combos in run_combos:
args = as_run_command(combos)
cmd, tmp_output_file = make_script_command_with_temp_output(_RUN_SCRIPT, args, count=loop_count, input=collector_tmp_output_file.name)
with tmp_output_file:
(passed, script_output) = execute_arbitrary_command(cmd, simulate, timeout)
parsed_output = simulate and [1,2,3] or parse_run_script_csv_file(tmp_output_file)
yield (passed, script_output, parsed_output)
def gather_results(commands: Iterable[Tuple[bool, str, List[int]]], key_list: List[str], value_list: List[Tuple[str, ...]]):
_debug_print("gather_results: key_list = ", key_list)
yield key_list + ["time(ms)"]
stringify_none = lambda s: s is None and "<none>" or s
for ((passed, script_output, run_result_list), values) in itertools.zip_longest(commands, value_list):
if not passed:
continue
for result in run_result_list:
yield [stringify_none(i) for i in values] + [result]
yield ["; avg(%s), min(%s), max(%s), count(%s)" %(sum(run_result_list, 0.0) / len(run_result_list), min(run_result_list), max(run_result_list), len(run_result_list)) ]
def eval_and_save_to_csv(output, annotated_result_values):
csv_writer = csv.writer(output)
for row in annotated_result_values:
csv_writer.writerow(row)
output.flush() # see the output live.
def main():
global _debug
opts = parse_options()
_debug = opts.debug
if _DEBUG_FORCE is not None:
_debug = _DEBUG_FORCE
_debug_print("parsed options: ", opts)
need_trace = not not set(opts.readaheads).intersection(set(_TRACING_READAHEADS))
if need_trace and not opts.inodes:
print("Error: Missing -in/--inodes, required when using a readahead of %s" %(_TRACING_READAHEADS), file=sys.stderr)
return 1
output_file = opts.output and open(opts.output, 'w') or sys.stdout
combos = lambda: generate_run_combinations(RunCommandArgs, vars(opts))
_debug_print_gen("run combinations: ", combos())
grouped_combos = lambda: generate_group_run_combinations(combos(), CollectorPackageInfo)
_debug_print_gen("grouped run combinations: ", grouped_combos())
exec = execute_run_combos(grouped_combos(), opts.simulate, opts.inodes, opts.timeout, opts.loop_count, need_trace)
results = gather_results(exec, _COMBINATORIAL_OPTIONS, combos())
eval_and_save_to_csv(output_file, results)
return 0
if __name__ == '__main__':
sys.exit(main())
| AndroidBBQ/android10 | frameworks/base/startop/scripts/app_startup/app_startup_runner.py | app_startup_runner.py | py | 12,868 | python | en | code | 176 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"lin... |
13989593352 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5.QtWidgets import QPushButton
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
btn1 = QPushButton('Button 1', self)
btn1.move(30, 50)
btn2 = QPushButton('Button 2', self)
btn2.move(150, 50)
btn1.clicked.connect(self.buttonClicked)
btn2.clicked.connect(self.buttonClicked)
self.statusBar()
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('sender')
self.show()
def buttonClicked(self):
sender = self.sender() # 获取信号发送者
self.statusBar().showMessage(sender.text() + ' was pressed')
if __name__ == '__main__':
app = QApplication(sys.argv)
win = MainWindow()
sys.exit(app.exec_())
| shellever/Python3Learning | thirdparty/pyqt5/signals/event-sender.py | event-sender.py | py | 952 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 19,
"usage_type": "call"
},
{
... |
27766460728 |
import pygame
from pygame.locals import *
from utils import *
os.path.dirname(__file__)
class Mouse(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.load_sprite()
def load_sprite(self):
self.sheet, self.sheet_rect = load_image('CURSORS_SHEET_1.png')
self.frames = []
width = self.sheet.get_width() / 4
height = self.sheet.get_height()
self.state = 0
for i in range(3):
rect = Rect(i * width, 0 , width, height)
self.frames.append(rect)
def set_state(self, state):
self.state = state
def draw(self, screen):
x, y = pygame.mouse.get_pos()
x -= 5
y -= 5
#x -= self.frames[self.state].width /2
#y -= self.frames[self.state].height /2
screen.blit(self.sheet, (x, y), self.frames[self.state])
| aladdin83/airport_control | lib/mouse.py | mouse.py | py | 775 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite.Sprite.__init__",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "py... |
72021503465 | from django.urls import path
from .views import loginPage, loginWithFlutter, logout, logoutFlutter, signupPage, signupWithFlutter
urlpatterns = [
path('signup/', signupPage, name='signup'),
path('login/', loginPage, name='login'),
path('logout/', logout, name='logout'),
path('loginflutter/', loginWithFlutter, name='loginFlutter'),
path('signupflutter/', signupWithFlutter, name='registerFlutter'),
path('logoutflutter/', logoutFlutter, name='logoutFlutter'),
]
| chrisbagas/C08 | login_form/urls.py | urls.py | py | 488 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "views.signupPage",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.loginPage",... |
6752686356 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QWidget, QTreeWidgetItem
from stuff.controllers.stuffcontroller import StuffController
from product.controllers.productcontroller import ProductController
from workshop.views.productioninstruction import Ui_Form
import datetime
class PorductionInstructionModule(QWidget, Ui_Form):
def __init__(self, autoid, parent=None):
super(PorductionInstructionModule, self).__init__(parent)
self.autoid = autoid
self.setupUi(self)
self.SC = StuffController()
self.PC = ProductController()
self.ori_detail = dict()
self.new_detail = dict()
# 获取物料信息
self.get_stufflist()
# 获取生产指令信息
self.get_detail()
# 获取已经领取了的物料
def get_stufflist(self):
values_tupe = (
"autoid", "lrid", "stuffid", "stuffname", "batchno", "spec",
"package", "presamount", "content", "cunit", "water",
"impurity", "rdensity", "presunit"
)
key_dict = {
'ppid': self.autoid,
'stufftype__in': (0, 1, 2)
}
res = self.SC.get_prodstuff(False, *values_tupe, **key_dict)
if len(res):
for item in res:
qtreeitem = QTreeWidgetItem(self.treeWidget_stufflist)
qtreeitem.setText(0, str(item['autoid'])) # autoid
qtreeitem.setText(1, str(item['lrid'])) # lrid
qtreeitem.setText(2, item['stuffid'] + ' ' + item[
'stuffname']) # 物料
qtreeitem.setText(3, item['batchno']) # 进厂批号
qtreeitem.setText(4, item['spec']) # 含量规格
qtreeitem.setText(5, item['package']) # 包装规格
qtreeitem.setText(6, str(item['presamount']) + item[
'presunit']) # 计划量
qtreeitem.setText(7, str(item['content']) + item[
'cunit']) # 含量/效价
qtreeitem.setText(8, str(item['water']) + '%') # 水分
qtreeitem.setText(9, str(item['impurity'])) # 相对密度
qtreeitem.setText(10, str(item['rdensity'])) # 杂质
self.treeWidget_stufflist.hideColumn(0)
self.treeWidget_stufflist.hideColumn(1)
for i in range(2, 11):
self.treeWidget_stufflist.resizeColumnToContents(i)
def get_detail(self):
values_list = (
'instructorid', 'instructorname', 'warrantorid', 'warrantorname',
'executorid', 'executorname', 'plandate', 'warrantdate',
'executedate'
)
key_dict = {
'autoid': self.autoid
}
res = self.PC.get_producingplan(False, *values_list, **key_dict)
if len(res) == 0:
return
self.ori_detail = res[0]
self.pushButton_instructor.setText(
self.ori_detail['instructorid'] + ' ' +
self.ori_detail['instructorname']
)
self.pushButton_warrantor.setText(
self.ori_detail['warrantorid'] + ' ' +
self.ori_detail['warrantorname']
)
self.pushButton_executor.setText(
self.ori_detail['executorid'] + ' ' +
self.ori_detail['executorname']
)
if type(self.ori_detail['plandate']) is datetime.date:
self.dateEdit_plandate.setDate(self.ori_detail['plandate'])
if type(self.ori_detail['warrantdate']) is datetime.date:
self.dateEdit_warrantdate.setDate(self.ori_detail['warrantdate'])
if type(self.ori_detail['executedate']) is datetime.date:
self.dateEdit_executedate.setDate(self.ori_detail['executedate'])
| zxcvbnmz0x/gmpsystem | workshop/modules/productioninstructionmodule.py | productioninstructionmodule.py | py | 3,775 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "workshop.views.productioninstruction.Ui_Form",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "stuff.controllers.stuffcontroller.StuffController",
"line_number": 18,
... |
980796399 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import sys
import os
sys.path.append("/Users/forute/Documents/Academy/Resaech/Clustering_Worker")
import experiment_syn.worker_num.create_worker_labeling_number_dataset as csd
import not_public.model.Dawid_Skene as ds
import not_public.model.Majority_Voting as mv
import not_public.model.Worker_Clustering as wcv
n = 1000
ms = [100]
K = 2
Ns = [2]
cs = [0.3]
L = 10
epsilon = 1e-2
for m in ms:
for N in Ns:
for c in cs:
xs = np.arange(m // 2 + 1) / m
task_class = csd.create_data(n, K, [c, 1 - c])
task = np.hsplit(task_class, [1])[0].ravel()
true_class = np.hsplit(task_class, [1])[1].ravel()
g = np.array(sorted(task_class, key=lambda pair: pair[0]))[:, 1]
name = "./experiment_syn/worker_num/for_paper/data_" + \
"n" + str(n) + \
"m" + str(m) + \
"K" + str(K) + \
"N" + str(N) + \
"c" + str(c) + \
"L" + str(L) + ".csv"
if os.path.exists(name):
data = pd.read_csv(name)[["MV", "DS", "proposed1(wcv)"]]
else:
data = pd.DataFrame(columns=["MV", "DS", "proposed1(wcv)"])
acc_mv_list = list(data["MV"])
acc_ds_list = list(data["DS"])
acc_wcv_list = list(data["proposed1(wcv)"])
xss = np.arange(len(data.index), m // 2 + 1) / m
LLL = len(data.index)
for i, x in enumerate(xss):
task_worker_class = csd.task_worker_label(m, K, task, true_class, N, [1 - x, 0.0, x])
g_mv = mv.MV(task_worker_class, n, m, K)
acc_mv = accuracy_score(g, g_mv)
print("mv = {}".format(acc_mv))
acc_mv_list.append(acc_mv)
g_ds, [pi_ds, rho_ds] = ds.DS_elbo_debug(task_worker_class, n, m, K, epsilon=epsilon)
acc_ds = accuracy_score(g, g_ds)
print("acc_ds = {}".format(acc_ds))
acc_ds_list.append(acc_ds)
g_wcv, [pi_wcv, rho_wcv], _ = wcv.wcv(task_worker_class, n, m, K, L, epsilon=epsilon)
acc_wcv = accuracy_score(g, g_wcv)
print("acc_wcv = {}".format(acc_wcv))
acc_wcv_list.append(acc_wcv)
# g_wcv_wp, _, _ = wcv_wp.EVI(task_worker_class, n, m, K, L, epsilon=epsilon)
# acc_wcv_wp = accuracy_score(g, g_wcv_wp)
# print(" wcv_wp = ", acc_wcv_wp)
acc = pd.DataFrame([{'MV': acc_mv,
'DS': acc_ds,
'proposed1(wcv)': acc_wcv}])
# 'proposed2(wcv_wp)': acc_wcv_wp})
data = data.append(acc, ignore_index=True)
data.to_csv("./experiment_syn/worker_num/for_paper/data_" +
"n" + str(n) +
"m" + str(m) +
"K" + str(K) +
"N" + str(N) +
"c" + str(c) +
"L" + str(L) + ".csv")
print(len(acc_mv_list))
print(i + LLL)
plt.scatter(xs[0:i + 1 + LLL], acc_mv_list, label="MV", color="blue")
plt.scatter(xs[0:i + 1 + LLL], acc_ds_list, label="DS", color="red")
plt.scatter(xs[0:i + 1 + LLL], acc_wcv_list, label="proposed", color="green")
# plt.scatter(xs[0:i + 1], acc_wcv_wp_list, label="proposed2", color="yellow")
plt.xlabel("proportion of adversary")
plt.ylabel("Accuracy")
plt.legend(loc="upper right")
plt.savefig("./experiment_syn/worker_num/for_paper/graph_" +
"n" + str(n) +
"m" + str(m) +
"K" + str(K) +
"N" + str(N) +
"c" + str(c) +
"L" + str(L) + ".pdf")
plt.clf()
| HideakiImamura/MinimaxErrorRate | experiment1/main.py | main.py | py | 4,253 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "experiment_syn.worker_num.cr... |
12867831014 | ############################################################
# Author: Aravind Potluri <aravindswami135@gmail.com>
# Description: A simple python based video streaming app.
############################################################
# Libraries
import cv2
import socket
import pickle
import struct
# Set up the client socket
try:
clientSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
targetIP = input("[#] Enter Streamer's IP: ") # Replace with the server's IP address
targetPort = int(input("[#] Enetr Port: "))
clientSock.connect((targetIP, targetPort))
except KeyboardInterrupt:
clientSock.close()
exit()
data = b""
payloadSize = struct.calcsize("L")
while True:
try:
# Data Processing
while len(data) < payloadSize:
packet = clientSock.recv(4 * 1024)
if not packet:
break
data += packet
packedMsgSize = data[:payloadSize]
data = data[payloadSize:]
msgSize = struct.unpack("L", packedMsgSize)[0]
while len(data) < msgSize:
data += clientSock.recv(4 * 1024)
frameData = data[:msgSize]
data = data[msgSize:]
# Deserialize the frame and display it
frame = pickle.loads(frameData)
cv2.imshow("Received", frame)
cv2.waitKey(1)
except KeyboardInterrupt:
break
except struct.error:
break
clientSock.close()
| name-is-cipher/pyVidStream | vidPlay.py | vidPlay.py | py | 1,439 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "socket.socket",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "struct.calc... |
37734863671 | from __future__ import print_function
import time
import numpy as np
import matplotlib.pyplot as plt
from stats232a.classifiers.fc_net import *
from stats232a.data_utils import *
from stats232a.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from stats232a.solver import Solver
from stats232a.layers import *
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# load CIFAR-10 dataset
# from stats232a.data_utils import get_CIFAR10_data
# data = get_CIFAR10_data()
# for k, v in list(data.items()):
# print(('%s: ' % k, v.shape))
class Test(object):
def __init__(self):
# Load the (preprocessed) MNIST data.
# The second dimension of images indicated the number of channel. For black and white images in MNIST, channel=1.
# Load the raw MNIST data
num_training = 59000
num_validation = 1000
num_test = 1000
subtract_mean = True
mnist_dir = '/home/parallels/PycharmProjects/Courses/232A/project2/stats232a/datasets'
X_train, y_train = load_mnist(dataset="training", path=mnist_dir, size=num_training + num_validation)
X_test, y_test = load_mnist(dataset="testing", path=mnist_dir, size=num_test)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
self.data = {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
#
# Test1 - forward
def test1(self):
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = fc_forward(x, w, b)
correct_out = np.array([[1.49834967, 1.70660132, 1.91485297],
[3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around 1e-9.
print('Testing fc_forward function:')
print('difference: ', rel_error(out, correct_out))
#
# Test2 - backprop
def test2(self):
np.random.seed(231)
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: fc_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: fc_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: fc_forward(x, w, b)[0], b, dout)
_, cache = fc_forward(x, w, b)
dx, dw, db = fc_backward(dout, cache)
# The error should be around 1e-10
print('Testing fc_backward function:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
#
# Test the relu_forward function
def test3(self):
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[0., 0., 0., 0., ],
[0., 0., 0.04545455, 0.13636364, ],
[0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be around 5e-8
print('Testing relu_forward function:')
print('difference: ', rel_error(out, correct_out))
#
# ReLU layer: backward
def test4(self):
np.random.seed(231)
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be around 3e-12
print('Testing relu_backward function:')
print('dx error: ', rel_error(dx_num, dx))
#
# Test: "Sandwich" layers
def test5(self):
from stats232a.layer_utils import fc_relu_forward, fc_relu_backward
np.random.seed(231)
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = fc_relu_forward(x, w, b)
dx, dw, db = fc_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: fc_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: fc_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: fc_relu_forward(x, w, b)[0], b, dout)
print('Testing affine_relu_forward:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
#
# Loss layers: Softmax
def test6(self):
np.random.seed(231)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print('\nTesting softmax_loss:')
print('loss: ', loss)
print('dx error: ', rel_error(dx_num, dx))
#
# Test: Two-layer network
def test7(self):
np.random.seed(231)
N, D, H, C = 3, 5, 50, 7
X = np.random.randn(N, D)
y = np.random.randint(C, size=N)
std = 1e-3
model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C)
print('Testing test-time forward pass ... ')
model.params['W1'] = np.linspace(-0.7, 0.3, num=D * H).reshape(D, H)
model.params['b1'] = np.linspace(-0.1, 0.9, num=H)
model.params['W2'] = np.linspace(-0.3, 0.4, num=H * C).reshape(H, C)
model.params['b2'] = np.linspace(-0.9, 0.1, num=C)
X = np.linspace(-5.5, 4.5, num=N * D).reshape(D, N).T
scores = model.loss(X)
correct_scores = np.asarray(
[[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096],
[12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143],
[12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319]])
scores_diff = np.abs(scores - correct_scores).sum()
assert scores_diff < 1e-6, 'Problem with test-time forward pass'
print('Testing training loss (no regularization)')
y = np.asarray([0, 5, 1])
loss, grads = model.loss(X, y)
correct_loss = 3.4702243556
assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'
model.reg = 1.0
loss, grads = model.loss(X, y)
correct_loss = 26.5948426952
assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'
for reg in [0.0, 0.7]:
print('Running numeric gradient check with reg = ', reg)
model.reg = 0
loss, grads = model.loss(X, y)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
#
# Test: Solver
def test8(self):
model = TwoLayerNet()
##############################################################################
# TODO: Use a Solver instance to train a TwoLayerNet that achieves at least #
# 96% accuracy on the validation set. #
##############################################################################
solver = Solver(model, self.data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=9, batch_size=200,
print_every=100)
solver.train()
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Run this cell to visualize training loss and train / val accuracy
plt.subplot(2, 1, 1)
plt.title('Training loss')
plt.plot(solver.loss_history, 'o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(solver.train_acc_history, '-o', label='train')
plt.plot(solver.val_acc_history, '-o', label='val')
plt.plot([0.5] * len(solver.val_acc_history), 'k--')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(15, 12)
plt.show()
#
# Test: Multiple Layers
def test9(self):
# TODO: Use a three-layer Net to overfit 50 training examples.
# You will need to tweak the learning rate and initialization scale
num_train = 50
small_data = {
'X_train': self.data['X_train'][:num_train],
'y_train': self.data['y_train'][:num_train],
'X_val': self.data['X_val'],
'y_val': self.data['y_val'],
}
weight_scale = 1e-1
learning_rate = 5e-4
model = FullyConnectedNet([100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
def test10(self):
# TODO: Use a five-layer Net to overfit 50 training examples.
# You will have to adjust the learning rate and weight initialization,
num_train = 50
small_data = {
'X_train': self.data['X_train'][:num_train],
'y_train': self.data['y_train'][:num_train],
'X_val': self.data['X_val'],
'y_val': self.data['y_val'],
}
learning_rate = 5e-4
weight_scale = 1e-1
model = FullyConnectedNet([100, 100, 100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
#
# SGD+Momentum test
def test11(self):
from stats232a.optim import sgd_momentum
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N * D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N * D).reshape(N, D)
v = np.linspace(0.6, 0.9, num=N * D).reshape(N, D)
config = {'learning_rate': 1e-3, 'velocity': v}
next_w, _ = sgd_momentum(w, dw, config=config)
expected_next_w = np.asarray([
[0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789],
[0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526],
[0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263],
[1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096]])
expected_velocity = np.asarray([
[0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158],
[0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105],
[0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053],
[0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096]])
print('next_w error: ', rel_error(next_w, expected_next_w))
print('velocity error: ', rel_error(expected_velocity, config['velocity']))
def test12(self):
# six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster.
num_train = 4000
small_data = {
'X_train': self.data['X_train'][:num_train],
'y_train': self.data['y_train'][:num_train],
'X_val': self.data['X_val'],
'y_val': self.data['y_val'],
}
solvers = {}
for update_rule in ['sgd', 'sgd_momentum']:
print('running with ', update_rule)
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': 1e-2,
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in list(solvers.items()):
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
#
# RMSProp
def test13(self):
# Test RMSProp implementation; you should see errors less than 1e-7
from stats232a.optim import rmsprop
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N * D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N * D).reshape(N, D)
cache = np.linspace(0.6, 0.9, num=N * D).reshape(N, D)
config = {'learning_rate': 1e-2, 'cache': cache}
next_w, _ = rmsprop(w, dw, config=config)
expected_next_w = np.asarray([
[-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247],
[-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774],
[0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447],
[0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]])
expected_cache = np.asarray([
[0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321],
[0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377],
[0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936],
[0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926]])
print('next_w error: ', rel_error(expected_next_w, next_w))
print('cache error: ', rel_error(expected_cache, config['cache']))
#
# Adam
def test14(self):
# Test Adam implementation; you should see errors around 1e-7 or less
from stats232a.optim import adam
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N * D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N * D).reshape(N, D)
m = np.linspace(0.6, 0.9, num=N * D).reshape(N, D)
v = np.linspace(0.7, 0.5, num=N * D).reshape(N, D)
config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5}
next_w, _ = adam(w, dw, config=config)
expected_next_w = np.asarray([
[-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977],
[-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929],
[0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969],
[0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]])
expected_v = np.asarray([
[0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853, ],
[0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385, ],
[0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767, ],
[0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]])
expected_m = np.asarray([
[0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474],
[0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316],
[0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158],
[0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85]])
print('next_w error: ', rel_error(expected_next_w, next_w))
print('v error: ', rel_error(expected_v, config['v']))
print('m error: ', rel_error(expected_m, config['m']))
#
# Train with RMSProp and Adam
def test15(self):
num_train = 4000
small_data = {
'X_train': self.data['X_train'][:num_train],
'y_train': self.data['y_train'][:num_train],
'X_val': self.data['X_val'],
'y_val': self.data['y_val'],
}
solvers = {}
learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3}
for update_rule in ['adam', 'rmsprop']:
print('running with ', update_rule)
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': learning_rates[update_rule]
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in list(solvers.items()):
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
#
# Batch Normalization: Forward in training mode
def test16(self):
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print('Before batch normalization:')
print(' means: ', a.mean(axis=0))
print(' stds: ', a.std(axis=0))
# Means should be close to zero and stds close to one
print('After batch normalization (gamma=1, beta=0)')
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print(' mean: ', a_norm.mean(axis=0))
print(' std: ', a_norm.std(axis=0))
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print('After batch normalization (nontrivial gamma, beta)')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
#
# Batch Normalization: Forward in test mode
def test17(self):
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in range(100):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
#
# Batch Normalization: backward
def test18(self):
# Gradient check batchnorm backward pass
N, D = 2, 3
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
dmean_num = np.sum(dx_num, axis=0) * N
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
# print('dmean error: ', rel_error(dmean_num, dmean), dmean, dmean_num)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
#
# Batch Normalization: Fully Connected Net
def test19(self):
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print('Running check with reg = ', reg)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
grads.pop('dx')
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
if reg == 0:
print()
#
# CIFAR-10
def finalTest(self):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
num_training = 49000
num_validation = 1000
num_test = 1000
subtract_mean = True
cifar10_dir = '/home/parallels/PycharmProjects/Courses/232A/project2/stats232a/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2)
X_val = X_val.transpose(0, 3, 1, 2)
X_test = X_test.transpose(0, 3, 1, 2)
# Package data into a dictionary
data = {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
# Training
weight_scale = 0
learning_rate = 5e-4
model = FullyConnectedNet([200, 200, 200, 200, 200], input_dim=3 * 32 * 32, reg=0.1,
weight_scale=weight_scale, dtype=np.float64, use_batchnorm=True)
solver = Solver(model, data,
print_every=50, num_epochs=20, batch_size=256,
update_rule='adam',
optim_config={
'learning_rate': learning_rate
},
verbose=True)
solver.train()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='Adam')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='Adam')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='Adam')
plt.gcf().set_size_inches(15, 15)
plt.show()
best_model = solver.model
y_test_pred = np.argmax(best_model.loss(data['X_test']), axis=1)
y_val_pred = np.argmax(best_model.loss(data['X_val']), axis=1)
print('Validation set accuracy: ', (y_val_pred == data['y_val']).mean())
print('Test set accuracy: ', (y_test_pred == data['y_test']).mean())
#
# Implement test
#
test = Test()
test.test9()
| riemanli/UCLA_STATS_232A_Statistical_Modeling_and_Learning_in_Vision_and_Cognition | project2/stats232a/test.py | test.py | py | 27,975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.max",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 47,... |
211978352 | """Contains models to use for prediction and classification."""
import pandas as pd
import joblib
from pandas import DataFrame
from sklearn.feature_selection import RFECV
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from visualise import PlotParams, simple_plot
def eval_features() -> None:
"""Evaluates features of the dataset against fetal_health."""
# Data
df: DataFrame = pd.read_csv("data/raw/fetal_health.csv")
X_train: DataFrame = df.drop(["fetal_health"], axis=1)
y_train: DataFrame = df["fetal_health"]
# Scaling
scaler: StandardScaler = StandardScaler().fit(X_train.values)
X_train_scaled = scaler.transform(X_train.values)
# Feature selection
rfecv: RFECV = RFECV(
estimator=SVC(kernel="linear"),
scoring="accuracy",
min_features_to_select=1,
)
rfecv.fit(X_train_scaled, y_train.values)
print(f"Optimal number of features: {rfecv.n_features_}")
print(f"Selected features: {', '.join(X_train.columns[rfecv.support_])}")
# Create a simple line plot
data = PlotParams(
x_values=range(1, len(rfecv.grid_scores_) + 1),
y_values=rfecv.grid_scores_,
title="Recursive feature elimination with cross-validation",
x_label="Number of features selected",
y_label="Cross validation score (accuracy)",
)
simple_plot(
data=data,
filename="feature-eval-plot.png",
)
def svm_train() -> None:
"""Trains a support vector classifier."""
# Data
df: DataFrame = pd.read_csv("data/raw/fetal_health.csv")
# Features chosen from the evaluation
X_data: DataFrame = df[
[
"baseline value",
"accelerations",
"uterine_contractions",
"prolongued_decelerations",
"abnormal_short_term_variability",
"percentage_of_time_with_abnormal_long_term_variability",
"histogram_number_of_peaks",
"histogram_mode",
"histogram_mean",
"histogram_median",
"histogram_variance",
]
]
y_data: DataFrame = df["fetal_health"]
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_data.values, y_data.values)
# Scales the data
scaler: StandardScaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Train model and score
filepath = "models/svm_classifier.joblib.pkl"
model: SVC = SVC(kernel="linear").fit(X_train_scaled, y_train)
# Saves model
joblib.dump(model, filepath)
print(f"Model score: {model.score(X_test_scaled, y_test)}")
| MikeyJL/fetal-health | src/model.py | model.py | py | 2,762 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
... |
19544623520 | # pyCharm and pyQT5 require significant setup
# https://pythonpyqt.com/how-to-install-pyqt5-in-pycharm/
# install pyqt5. pyqt5-sip, pyqt5-tools for use with pycharm
# PyCharm select File | Settings | Tools | PyCharm. External Tools, click + New Tools, Create QTdesigner and PyUIC tools
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout, QHBoxLayout, QCheckBox, QPushButton
from PyQt5.QtGui import QPixmap
import sys
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import os
import rec_audio
import rec_webcam
from multiprocessing import shared_memory, Process
from datetime import datetime, timedelta
import time
def newRecDir(**kwargs):
fullPath = os.getcwd() + '\\'
if 'path' in kwargs:
fullPath = kwargs("path")
if kwargs("path")[-1] != "\\":
fullPath += '\\'
now = datetime.now().date()
now = str(now)
now = "_" + now.replace("-","_")
c = 97 # 97 to 122 is a-z
while os.path.isdir(fullPath + now + "_" + chr(c)):
c += 1
if c > 122:
now += "_"
c = 97
now += "_" + chr(c)
print(fullPath, now)
if fullPath[1] == ":": # must remove the c: drive designation crashes????
os.mkdir(fullPath[2:] + now)
else:
os.mkdir(fullPath+ now)
fullPath += now
return fullPath
def startRecording(rec_controls):
timeNow = (datetime.now() + timedelta(0, 3))
timeSecond = timeNow.second
# timeMinute = timeNow.minute
print("start Second in startRecording", timeSecond)
rec_controls[0] = 1 # start recording
def stopRecording(rec_controls):
timeNow = (datetime.now() + timedelta(0, 3)) # set the stop recording
timeSecond = timeNow.second
#timeMinute = timeNow.minute
rec_controls[1] = timeSecond # end recording
# rec_controls[2] = timeMinute
rec_controls[0] = 0 # end recording recording
print("stop second in stopRecording", datetime.now().second)
def close_ProcsMem(runningProcs, rec_controls_sm):
for process in runningProcs:
while process.is_alive():
time.sleep(.2)
process.join()
process.terminate()
time.wait(4)
rec_controls_sm.close()
rec_controls_sm.unlink()
class userInterface(QWidget):
def __init__(self, rec_controls, runningProcs):
super().__init__()
self.setFixedHeight(300)
self.setFixedWidth(600)
self.rec_controls = rec_controls
self.runningProcs = runningProcs
#self.buttRecordStartPauseStop.clicked.connect(self.startStopPause)
self.lay_controls = QHBoxLayout()
self.butt_stopRec = QPushButton('stop recording')
self.butt_stopRec.clicked.connect(self.stopRecording)
self.lay_controls.addWidget(self.butt_stopRec)
self.setLayout(self.lay_controls)
for process in runningProcs:
process.start()
# embed in GUI to start recording
startRecording(rec_controls)
def startRecording(self):
startRecording(self.rec_controls)
def stopRecording(self):
stopRecording(self.rec_controls)
def closeEvent(self):
print("closing user interface")
close_ProcsMem(runningProcs, rec_controls_sm)
if __name__ == '__main__':
saveFilesPath = newRecDir()
init_rec_controls = [0,99,99] # max 255 int8
rec_controls_sm = shared_memory.SharedMemory(create=True, size=len(init_rec_controls))
rec_controls = rec_controls_sm.buf
rec_controls[:] = bytearray(init_rec_controls)
audioDevice = 1
webCamDevice = 1
runningProcs = []
runningProcs.append(Process(target= \
rec_audio.runCapture, args=(audioDevice, rec_controls_sm, saveFilesPath)))
runningProcs.append(Process(target= \
rec_webcam.runCapture, args=(webCamDevice, rec_controls_sm, saveFilesPath)))
runningProcs.append(Process(target= \
rec_webcam.runCapture, args=(2, rec_controls_sm, saveFilesPath)))
# replace start and end recording with user interface
app = QApplication(sys.argv)
a = userInterface(rec_controls, runningProcs)
a.show()
sys.exit(app.exec_()) # auto shuts without this
| Richard-Kershner/Audio-Video-Screen-TimeStamp-Recorder | main.py | main.py | py | 4,362 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
... |
23078124291 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, copy
from spa.clientside import CSocketPool, CConnectionContext, CSqlite
class MyStruct(object):
def __init__(self):
self.reset()
def reset(self):
self.dmax = 0.0
self.dmin = 0.0
self.davg = 0.0
self.returned = 0
self.row = []
with CSocketPool(CSqlite) as sp:
cycles = 10000
sessions_per_host = 2
threads = 1
vHost = ['localhost', 'ws-yye-1']
channels = sessions_per_host * len(vHost)
sp.QueueName = 'ar_python' # set a local queue to backup requests for auto fault recovery
mcc = [[0 for i in range(channels)] for i in range(threads)]
m = 0
while m < threads:
n = 0
while n < sessions_per_host:
j = 0
while j < len(vHost):
mcc[m][n * sessions_per_host + j] = CConnectionContext(vHost[j], 20901, 'root', 'Smash123')
j += 1
n += 1
m += 1
ok = sp.StartSocketPoolEx(mcc)
if not ok:
s = input('No connection to SQL stream server and press ENTER key to shut down the application ......')
exit(-1)
handler = sp.SeekByQueue()
sql = 'SELECT max(amount), min(amount), avg(amount) FROM payment'
filter = input('Input a filter for payment_id')
if len(filter) > 0:
sql += (' WHERE ' + filter)
v = sp.AsyncHandlers
def dr(h, res, errMsg):
if res:
print('Error code: %d, error message: %s' % (res, errMsg))
for h in v:
ok = h.Open('sakila.db', dr)
mystruct = MyStruct()
def r(h, vData):
mystruct.row = copy.deepcopy(vData)
def er(h, res, errMsg, affected, fail_ok, lastId):
if res != 0:
print('Error code: %d, error message: %s' % (res, errMsg))
else:
mystruct.dmax += mystruct.row[0]
mystruct.dmin += mystruct.row[1]
mystruct.davg += mystruct.row[2]
mystruct.returned += 1
h = sp.SeekByQueue()
ok = h.Execute(sql, er, r) # get a one record data for comparison
ok = h.WaitAll()
print('Result: max = %f, min = %f, avg = %f' % ( mystruct.dmax, mystruct.dmin, mystruct.davg))
mystruct.reset()
n = 0
while n < cycles:
h = sp.SeekByQueue()
ok = h.Execute(sql, er, r)
n += 1
for h in v:
ok = h.WaitAll()
print('Returned = %d, max = %f, min = %f, avg = %f' % (mystruct.returned, mystruct.dmax, mystruct.dmin, mystruct.davg))
s = input('Press ENTER key to shut down the application ......')
| udaparts/socketpro | samples/auto_recovery/test_python/test_python.py | test_python.py | py | 2,569 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "spa.clientside.CSocketPool",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "spa.clientside.CSqlite",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "spa.clientside.CConnectionContext",
"line_number": 32,
"usage_type": "call"
},
... |
13600318080 | import scrapy
from common.util import xpath_class
from event.items import ResponseItem
class BIOEventSpider(scrapy.Spider):
name = 'bio_event'
base_url = 'https://www.bio.org'
events_path = '/events'
source = 'BIO'
custom_settings = {
'ITEM_PIPELINES': {
'event.spiders.bio.pipelines.BIOEventPipeline': 400,
'event.pipelines.EventTypePipeline': 401,
'event.pipelines.EventLocationPipeline': 402,
'event.pipelines.EventDatePipeline': 403,
'event.pipelines.UrlCleanerPipeline': 404,
'event.pipelines.EventMetadataPipeline': 405,
'event.pipelines.StripperPipeline': 406,
'event.pipelines.WhitespaceNormalizerPipeline': 407,
'common.pipelines.CsvWriterPipeline': 900,
}
}
def start_requests(self):
yield scrapy.Request(f'{self.base_url}{self.events_path}')
def parse(self, response: scrapy.http.Response, **kwargs):
entries = response.xpath(
f'//div[{xpath_class(["event-search"])}]//table/tbody/tr').getall()
for entry in entries:
yield ResponseItem({'body': entry, 'meta': response.meta})
| JuroOravec/knwldg | event/event/spiders/bio/spiders.py | spiders.py | py | 1,199 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scrapy.http",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "common.util.xpath_c... |
36714514227 | from dash import html
import dash_bootstrap_components as dbc
from dash.development.base_component import Component
from dataviz.irenderer import IDataStudyRenderer
from dash import dcc
from dataviz.plot_types import name_to_plot
from dataviz.assets.ids import IDAddPlotModal as ID
horizontal_line = html.Hr(style={'borderWidth': "0.3vh", "width": "100%",
"backgroundColor": "#B4E1FF",
"opacity": "1"})
def modal_add_plot(renderer: IDataStudyRenderer):
print("rendering modal_add_plot")
return dbc.ModalBody(
children=[dbc.Modal(
[
dbc.ModalHeader("Add plot", style={"color": "#171F26"}),
dbc.ModalBody(
id=ID.body,
children=[
dcc.Dropdown(
list(renderer.study.views.keys()),
id=ID.dropdown_views,
style={"araton": "black"},
),
html.Div(id=ID.preview),
horizontal_line,
html.Div(
id=ID.dropdown_plots_div,
children=[
dcc.Dropdown(
[],
id=ID.dropdown_plots,
style={"araton": "black"},
),
]
),
html.Div(id=ID.config_panel_div),
],
style={"height": "80vh"}
),
dbc.ModalFooter([
dbc.Button(
"VALIDATE", id=ID.validate_button,
className="ml-auto",
)
]),
],
id=ID.id,
is_open=False, # Open the modal at opening the webpage.
backdrop=True,
# Modal to not be closed by clicking on backdrop
scrollable=True,
# Scrollable in case of large amount of text
centered=True, # Vertically center modal
keyboard=True, # Close modal when escape is pressed
fade=True, # Let the modal fade instead of appear.
),
],
style={
"max-width": "none", "width": "90%",
"max-height": "none", "height": "90%"
},
)
def plot_args_div_callback(plot_name, selected_view, plot_args_div_children,
select_plot_state) -> list[Component]:
updated_config = []
# If we don't have any plot selected or if we changed the plot, we must
# update the plot args panel
try:
if ((not plot_args_div_children and plot_name) or
(plot_name != select_plot_state)):
# Config panel given by the plot type
updated_config = name_to_plot(plot_name).config_panel(selected_view)
except NotImplementedError:
print(len(updated_config))
return updated_config
| adangreputationsquad/theriver | dataviz/pages/add_plot_modal.py | add_plot_modal.py | py | 3,145 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dash.html.Hr",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "dataviz.irenderer.IDataStudyRenderer",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "dash_boot... |
40082631108 | # import packages / libraries
import torch
from torchvision.models import resnet
class MNIST_classifier(torch.nn.Module):
""" implements a simple ConvNet for classifying MNIST images """
def __init__(self, seed):
""" initializes two Conv-Layers followed by two linear layers """
super().__init__()
_ = torch.manual_seed(seed)
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(1, 3, kernel_size=3, padding=1),
torch.nn.Conv2d(3, 9, kernel_size=3, padding=1),
torch.nn.Linear(in_features=441, out_features=66),
torch.nn.Linear(in_features=66, out_features=10)
])
self.batch_norm_layers = torch.nn.ModuleList([
torch.nn.BatchNorm2d(num_features=3),
torch.nn.BatchNorm2d(num_features=9)
])
self.num_conv_layers = 2
self.num_linear_layers = 2
self.num_classes = 10
def forward(self, x):
""" passes the input through the network and returns the output """
for i, layer in enumerate(self.layers):
if i == self.num_conv_layers:
x = x.flatten(start_dim=1)
x = layer(x)
if i < self.num_conv_layers + self.num_linear_layers:
x = torch.nn.ReLU()(x)
if i < self.num_conv_layers:
x = self.batch_norm_layers[i](x)
x = torch.nn.functional.max_pool2d(x, kernel_size=2)
return x
class CIFAR10_classifier(torch.nn.Module):
""" implements a simple ConvNet for classifying CIFAR-10 images """
def __init__(self, seed):
""" initializes six Conv-Layers followed by two linear layers """
super().__init__()
_ = torch.manual_seed(seed)
self.layers = torch.nn.ModuleList([
torch.nn.Conv2d(3, 6, kernel_size=3, padding=1),
torch.nn.Conv2d(6, 9, kernel_size=3, padding=1),
torch.nn.Conv2d(9, 12, kernel_size=3, padding=1),
torch.nn.Conv2d(12, 15, kernel_size=3, padding=1),
torch.nn.Conv2d(15, 18, kernel_size=3, padding=1),
torch.nn.Conv2d(18, 21, kernel_size=3, padding=1),
torch.nn.Linear(in_features=336, out_features=58),
torch.nn.Linear(in_features=58, out_features=10)
])
self.batch_norm_layers = torch.nn.ModuleList([
torch.nn.BatchNorm2d(num_features=6),
torch.nn.BatchNorm2d(num_features=9),
torch.nn.BatchNorm2d(num_features=12),
torch.nn.BatchNorm2d(num_features=15),
torch.nn.BatchNorm2d(num_features=18),
torch.nn.BatchNorm2d(num_features=21)
])
self.num_classes = 10
self.num_conv_layers = 6
self.num_linear_layers = 2
def forward(self, x):
""" passes the input through the network and returns the output """
for i, layer in enumerate(self.layers):
if i == self.num_conv_layers:
x = x.flatten(start_dim=1)
x = layer(x)
if i < self.num_conv_layers + self.num_linear_layers:
x = torch.nn.ReLU()(x)
if i < self.num_conv_layers:
x = self.batch_norm_layers[i](x)
if i % 2 == 1:
x = torch.nn.functional.max_pool2d(x, kernel_size=2)
return x
class IMAGENET_classifier(resnet.ResNet):
""" adjusted version of the ResNet18 for fewer classes """
def __init__(self, seed, num_classes=10):
super().__init__(resnet.BasicBlock, [2, 2, 2, 2])
self.num_classes = num_classes
self.fc = torch.nn.Linear(
in_features=self.fc.in_features,
out_features=self.num_classes
)
| michaelhodel/adversarial-training-with-lots | models.py | models.py | py | 3,745 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"li... |
5546998859 | from brownie import interface
from utils.voting import create_vote
from utils.config import (lido_dao_voting_address,
lido_dao_token_manager_address,
lido_dao_node_operators_registry,
get_deployer_account)
from utils.evm_script import encode_call_script
from utils.node_operators import encode_set_node_operators_staking_limits_evm_script
import json, sys, os, re, time
def set_node_operator_staking_limits(tx_params, node_operators):
registry = interface.NodeOperatorsRegistry(lido_dao_node_operators_registry)
evm_script = encode_set_node_operators_staking_limits_evm_script(node_operators, registry)
return create_vote(
vote_desc=
f'Set staking limit for operators: \n{os.linesep.join(["id {} set limit to {}".format(no["id"], no["limit"]) for no in node_operators])}',
evm_script=evm_script,
tx_params=tx_params)
def main():
file_path = os.environ['NODE_OPERATORS_JSON']
with open(file_path) as json_file:
data = json.load(json_file)
node_operators = data["node_operators"]
validate_data(node_operators)
(vote_id, _) = set_node_operator_staking_limits(
{"from": get_deployer_account()}, node_operators)
time.sleep(5) # hack: waiting thread 2
print(f'Voting created: {vote_id}')
return 0
def validate_data(node_operators):
for node_operator in node_operators:
assert 'id' in node_operator, "Node operator should contain \"id\""
assert node_operator["id"] >= 0
assert 'limit' in node_operator, "Node operator should contain \"limit\""
assert node_operator["limit"] >= 0
interface.NodeOperatorsRegistry(
lido_dao_node_operators_registry).getNodeOperator(
node_operator["id"], True)
ids = [no["id"] for no in node_operators]
assert len(ids) == len(set(ids)), "Duplicated operators"
| lidofinance/scripts | archive/scripts/set_node_operators_limit.py | set_node_operators_limit.py | py | 1,967 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "brownie.interface.NodeOperatorsRegistry",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utils.config.lido_dao_node_operators_registry",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "brownie.interface",
"line_number": 15,
"usage_t... |
30389517296 | import os
import sys
import pytest
from logpyle import LogManager, LogQuantity
# {{{ mpi test infrastructure
def run_test_with_mpi(num_ranks, f, *args, extra_env_vars=None):
pytest.importorskip("mpi4py")
if extra_env_vars is None:
extra_env_vars = {}
from base64 import b64encode
from pickle import dumps
from subprocess import check_call
env_vars = {
"RUN_WITHIN_MPI": "1",
"INVOCATION_INFO": b64encode(dumps((f, args))).decode(),
}
env_vars.update(extra_env_vars)
# NOTE: CI uses OpenMPI; -x to pass env vars. MPICH uses -env
check_call([
"mpiexec", "-np", str(num_ranks),
"--oversubscribe",
] + [
item
for env_name, env_val in env_vars.items()
for item in ["-x", f"{env_name}={env_val}"]
] + [sys.executable, "-m", "mpi4py", __file__])
def run_test_with_mpi_inner():
from base64 import b64decode
from pickle import loads
f, args = loads(b64decode(os.environ["INVOCATION_INFO"].encode()))
f(*args)
# }}}
def setup_manager() -> LogManager:
from mpi4py import MPI # pylint: disable=import-error
comm = MPI.COMM_WORLD
filename = "THIS_LOG_SHOULD_BE_DELETED.sqlite"
logmgr = LogManager(filename, "wu", comm)
return logmgr, comm
def teardown_manager(logmgr: LogManager):
logmgr.close()
os.remove(logmgr.sqlite_filename)
def test_distributed_execution_basic():
run_test_with_mpi(2, _do_test_distributed_execution_basic)
def _do_test_distributed_execution_basic():
logmgr, comm = setup_manager()
rank = comm.Get_rank()
size = comm.Get_size()
print("Rank " + str(rank) + " of " + str(size))
print(str(rank), str(logmgr.rank))
assert rank == logmgr.rank
assert logmgr.is_parallel is True
teardown_manager(logmgr)
def test_distributed_execution_add_watches():
run_test_with_mpi(2, _do_test_distributed_execution_basic)
def _do_test_distributed_execution_add_watches():
logmgr, comm = setup_manager()
rank = comm.Get_rank()
size = comm.Get_size()
print("Rank " + str(rank) + " of " + str(size))
class Fifteen(LogQuantity):
def __call__(self) -> int:
return 15
class FifteenStr(LogQuantity):
def __call__(self) -> str:
return "15.0"
logmgr.add_quantity(Fifteen("name1"))
logmgr.add_quantity(Fifteen("name2"))
logmgr.add_quantity(FifteenStr("tup_name1"))
watch_list = ["name1", ("tup_name1", "str"), "name2"]
logmgr.add_watches(watch_list)
logmgr.tick_before()
# do something ...
logmgr.tick_before()
logmgr.save()
# check that all watches are present
actual_watches = [watch.expr for watch in logmgr.watches]
expected = ["name1", "tup_name1", "name2"]
actual_watches.sort()
expected.sort()
print(actual_watches, expected)
assert actual_watches == expected
teardown_manager(logmgr)
if __name__ == "__main__":
if "RUN_WITHIN_MPI" in os.environ:
run_test_with_mpi_inner()
elif len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
| illinois-ceesd/logpyle | test/test_distributed.py | test_distributed.py | py | 3,200 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "pytest.importorskip",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pickle.dumps",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "subprocess.check_ca... |
11998014126 | #!/usr/bin/python
import json
import math
cpus = 0
with open('/proc/cpuinfo') as f:
for line in f:
if 'processor' in line:
cpus += 1
meminfo = {}
with open('/proc/meminfo') as f:
for line in f:
meminfo[line.split(':')[0]] = line.split(':')[1].strip()
memory = int(meminfo['MemTotal'].split(' ')[0])/1000
memory = int(math.floor(memory / 1000.0))
info = {'cpus':cpus, 'memory':memory}
with open('/etc/prominence.json', 'w') as file:
json.dump(info, file)
| prominence-eosc/prominence | htcondor/images/worker/write-resources.py | write-resources.py | py | 498 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "math.floor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 21,
"usage_type": "call"
}
] |
9105847241 | # coding=utf-8
import webapp2
import sys
import config
import services.files
import services.event
import services.restore
from services.template import render
try:
from google.appengine.api import taskqueue
except ImportError:
pass
reload(sys) # Reload does the trick!
sys.setdefaultencoding('utf8')
class ExportHandler(webapp2.RequestHandler):
def get(self):
""" Show grouped files in bucket """
try:
files = services.files.group_files()
except OSError:
self.response.write('Create "data" folder')
return
if config.GET_BACKUP_FROM == 'local':
path = config.BACKUP_DIR
else:
path = 'cloud'
self.response.write(render('export.html', {'files': files,
'path': path}))
def post(self):
try:
file_name = self.request.get('file')
if config.GET_BACKUP_FROM == 'local':
# We can't write to local file system, because of gae limitations
# And we don't wont store data in cloud storage locally
# So we just send data row by row to some server
# See config.py (RESTORE_URL)
worker_url = '/export/worker/http'
else:
worker_url = '/export/worker'
taskqueue.add(url=worker_url,
params={'file': file_name})
self.response.write('New task was added successfully. Url: %s' % worker_url)
except Exception as e:
self.response.write('Error: %s' % e.message)
class ExportHttpWorker(webapp2.RequestHandler):
"""
Export backup files
Send data to http service (config.py RESTORE_URL and GET_BACKUP_FROM == 'local')
"""
def post(self):
try:
services.restore.export_backup_to_http_service(self.request.get('file'))
except OSError:
self.response.write('Create "data" folder')
return
except Exception as e:
self.response.write('Error %s' % e.message)
return
self.response.write('ok')
class ExportWorker(webapp2.RequestHandler):
def post(self):
try:
services.restore.export_backup_from_local(self.request.get('file'))
except OSError:
self.response.write('Create "data" folder')
return
except Exception as e:
self.response.write('Error: %s' % e.message)
self.response.write('ok')
| nicklasos/gae-data-fallback | controllers/restore.py | restore.py | py | 2,551 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "services.files.files.group_files",
"line_number": 25,
"usage_type": "call"
},
{
... |
16957389593 | from collections import deque
from variables import graph
from variables import vertex
def breadth_first_search(searchG, s, d):
R = dict()
R[s] = s
Q = deque()
Q.append(s)
while Q:
u = Q.popleft()
for v in searchG.neighbours(u):
if v not in R:
R[v] = u
Q.append(v)
ptNow = d
result = [d]
if d not in R.keys():
return []
while ptNow != s:
ptNow = R[ptNow]
result.append(ptNow)
result.reverse()
return result
def loadmap(filename):
inputFile = open(filename, "r")
lines = inputFile.readlines()
vertexData = dict()
g = graph()
for i in range(len(lines)):
lines[i] = lines[i].rstrip().split(",")
for l in lines:
if l[0] == "V":
temp = vertex(int(l[1]), int(l[2]), int(l[3]), int(l[4]),
l[5], bool(int(l[6])), bool(int(l[7])), True)
g.add_vertex(int(l[1]))
vertexData[int(l[1])] = temp
elif l[0] == "E":
g.add_edge((int(l[1]), int(l[2])))
g.add_edge((int(l[2]), int(l[1])))
return g, vertexData
def findenemy(soldierData, RvertexData):
result = dict()
for i in RvertexData.keys():
if RvertexData[i].ifmist is False and \
soldierData[i].getnum() != 0:
result[i] = soldierData[i]
if result is None:
return False
return result
| MichaelQi11/Mid-Age-Plane-War | Functions.py | Functions.py | py | 1,451 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "variables.graph",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "variables.vertex",
"line_number": 37,
"usage_type": "call"
}
] |
16129025675 | from twisted.internet import reactor, defer
from twisted.web.client import getPage
count = 0
class Request:
def __init__(self, url, callback):
self.url = url
self.callback = callback
class HttpResponse:
def __init__(self, content, request):
self.content = content
self.request = request
self.url = request.url
self.text = str(content, encoding="utf-8")
import queue
Q = queue.Queue()
class Engine:
def __init__(self):
self._close = None
self.max = 5
self.crawing = []
def get_response_callback(self, content, request):
global count
print("哈哈哈", count)
self.crawing.remove(request)
response = HttpResponse(content, request)
result = request.callback(response)
import types
if isinstance(result, types.GeneratorType):
for req in result:
Q.put(req)
def _next_request(self):
global count
count += 1
print('222', count)
if Q.qsize() == 0 and len(self.crawing) == 0:
print("如果是空就走这里是吧")
self._close.callback(None)
return
if len(self.crawing) >= self.max:
print("如果达到上限了就走这里是吧")
return
while len(self.crawing) < self.max:
try:
print("我估计这里会打印4次")
req = Q.get(block=False)
print("我估计这里会打印3次")
self.crawing.append(req)
dfd = getPage(req.url.encode("utf-8"))
print("会在这等吗") # 不会再这等,而是一次全部执行完,一次全部加载吗,还是就是单纯的异步不等待而已
dfd.addCallback(self.get_response_callback, req)
dfd.addCallback(lambda _: reactor.callLater(0, self._next_request))
except Exception:
return
@defer.inlineCallbacks
def crawl(self, spider): # 这里是第一步执行,会把所有的request推到队列中
start_requests = iter(spider.start_requests())
while True:
try:
request = next(start_requests)
Q.put(request)
except StopIteration:
break
reactor.callLater(0, self._next_request) # 这里只是一个注册,并不直接执行 然后立即执行一次,与scrapy中的简直一模一样,任务全推进去后就开始执行,且不会 # 这里是run开启后第一个执行的
print('111')
self._close = defer.Deferred() # 需要返回一个defer对象
print('原来如此,会把这个流程走完,因为是生成器,会走到第一个yield来')
yield self._close
print('111-111')
class MySpider:
start_urls = ["http://fanyi.youdao.com/","http://fanyi.youdao.com/","http://fanyi.youdao.com/"]
def start_requests(self):
for url in self.start_urls:
yield Request(url, callback=self.parse)
def parse(self, response):
print([response.text])
if __name__ == "__main__":
spider = MySpider() # 执行init
_active = set()
engine = Engine() # 执行init
d = engine.crawl(spider) # 执行crawl方法,走完第一个yield方法,但是里面的defer方法等都不会执行,除非直接遇到了callback方法吗
_active.add(d)
dd = defer.DeferredList(_active)
dd.addBoth(lambda _: reactor.stop())
print('HERE END')
reactor.run() # 正式启动twisted
| czasg/ScrapyLearning | czaSpider/dump/异步/scrapy模拟/test.py | test.py | py | 3,582 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "queue.Queue",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "types.GeneratorType",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "twisted.web.client.getPage",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "twist... |
32919021229 | import logging
import multiprocessing_logging
logging.basicConfig(filename="parsing.log", level=logging.INFO)
multiprocessing_logging.install_mp_handler()
import os
import sys
from seamr import parsers
from seamr.core import Store
import argparse
from tqdm import tqdm
from datetime import datetime
from multiprocessing import Pool
parse_classes = [parsers.CASASParser,
parsers.HorizonHouse,
parsers.ArasParser]
def addLabelIDs(act_id, res_id, stream):
for (line, res, act, s, e) in stream:
yield (line, act_id[act], res_id[res], res, act, s.timestamp(), e.timestamp())
def setSensorID(sensor_ids, stream):
for (ln, sname, state, tm) in stream:
yield (int(ln), int(sensor_ids[sname]), float(state), float(tm.timestamp()))
def convert( dset_dir ):
Parser = [ c for c in parse_classes if c.can_parse(os.path.basename(dset_dir)) ][0]
print("Using: %s" % str(Parser))
dset = os.path.basename(dset_dir)
env_name = dset.split(".")[0]
p = Parser( dset_dir )
residents = set()
sensors = set()
acts = dict(other=0.0)
for line, res, act, _s, _e in tqdm(p.gen_labels(), desc="Setting up labels for %s" % dset):
acts[act] = acts.get(act,0.0) + (_e - _s).total_seconds()
residents.add(res)
print("Done with %s" % dset_dir)
return (dset, dset.split(".")[0], len(residents))
# sem.support_activities
# ==========================================================================
parser = argparse.ArgumentParser(description="Import raw data into SEAMR formats")
parser.add_argument("store", help="store directory")
parser.add_argument("raw", nargs="+", help="Raw data")
args = parser.parse_args()
store = Store(args.store)
dirs = sorted([ d for d in args.raw if os.path.isdir(d) ])
parsable_dirs = []
for d in dirs:
parsable = [ c.can_parse(os.path.basename(d)) for c in parse_classes ]
if not any(parsable):
print("Don't know how to parse: %s : will be ignoring" % d)
else:
parsable_dirs.append(d)
pool = Pool()
mapper = map # pool.imap_unordered
"""
Expects tuples like:
(int id, unicode name, unicode env, int n_res)
"""
for X in mapper(convert, parsable_dirs):
pass
print("Done with datasets, closing pool")
| zewemli/seamr | seamr/cli/check_label_parsing.py | check_label_parsing.py | py | 2,364 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing_logging.install_mp_handler",
"line_number": 5,
"usage_type": "call"
},
{
"api... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.