seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17040080901 | import requests
import time
from bs4 import BeautifulSoup
def ProfessorLunkedInScrapper(
ProfessorName,CollegeName
):
#ProfessorName = "Roel Verstappen"
#CollegeName = "University of Groningen"
query = 'https://google.com/search?q=site:linkedin.com/in AND "'+ProfessorName+'" AND "'+CollegeName+'"'
response = requests.get(query)
soup = BeautifulSoup(response.text,'html.parser')
print(soup)
for anchor in soup.find_all('a'):
url = anchor["href"]
if 'https://www.linkedin.com/' in url:
url = url[7:url.find('&')]
#print(url)
time.sleep(1)
return url
print(ProfessorLunkedInScrapper("Steven Pinker","Harvard")) | brucema94/Expertfinder | LinkedinUrl_From_Name.py | LinkedinUrl_From_Name.py | py | 664 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
}
] |
10137589364 | '''
File name: /ciphers/block_ciphers/anu/cipher.py
Author: Cesar Cruz
Project: cryptofeather
Python Version: 2.7
'''
import numpy
from constants import BLOCK_LENGTH, KEY_LENGTH, NUMBER_OF_ROUNDS, SBOX, PBOX, SBOX_INV
from utils.logic_operations import xor
from utils.crypto import sbox_operation, permutation_layer, generate_pboxinv
from utils.others import pretty_print
from utils.latex.table_generator import generate_table
def _cipher(plaintext, keys):
pl = plaintext[:(BLOCK_LENGTH / 2)]
pr = plaintext[(BLOCK_LENGTH / 2):]
for i in range(NUMBER_OF_ROUNDS):
f1, f2 = f_function(pl)
pt = xor(f1, pr)
pt = xor(pt, xor(f2, keys[i]))
pr = permutation_layer(PBOX, pl)
pl = permutation_layer(PBOX, pt)
return pl + pr
def _cipher_latex(plaintext, keys):
pl = plaintext[:(BLOCK_LENGTH / 2)]
pr = plaintext[(BLOCK_LENGTH / 2):]
rows = []
for i in range(NUMBER_OF_ROUNDS):
RK = pretty_print(keys[i], len(keys[i]))
f1, f2 = f_function(pl)
f1_lat = pretty_print(f1, len(f1))
f2_lat = pretty_print(f2, len(f2))
pt = xor(f1, pr)
A = pretty_print(pt, len(pt))
pt = xor(pt, xor(f2, keys[i]))
B = pretty_print(pt, len(pt))
pr = permutation_layer(PBOX, pl)
C = pretty_print(pr, len(pr))
pl = permutation_layer(PBOX, pt)
D = pretty_print(pl, len(pl))
row = [i, RK, f1_lat, f2_lat, A, B, C, D]
rows.append(row)
header_row1 = ["Ronda", "RK", "F1", "F2", "A", "B", "C", "D"]
generate_table("ANU Cifrado", header_row1, rows, "anuCifrado")
return pl + pr
def _decipher(ciphertext, keys):
pl = ciphertext[:(BLOCK_LENGTH / 2)]
pr = ciphertext[(BLOCK_LENGTH / 2):]
pbox_inv = generate_pboxinv(PBOX)
for i in range(NUMBER_OF_ROUNDS - 1, -1, -1):
# Swap
tmp_pl = pl
pl = pr
pr = tmp_pl
# Capa de permutacion
pl = permutation_layer(pbox_inv, pl)
pr = permutation_layer(pbox_inv, pr)
# Funcion f
f1, f2 = f_function(pl)
# Operaciones X-OR
pr = xor(pr, xor(f2, keys[i]))
pr = xor(pr, f1)
return pl + pr
def _decipher_latex(ciphertext, keys):
pl = ciphertext[:(BLOCK_LENGTH / 2)]
pr = ciphertext[(BLOCK_LENGTH / 2):]
pbox_inv = generate_pboxinv(PBOX)
rows = []
for i in range(NUMBER_OF_ROUNDS - 1, -1, -1):
RK = pretty_print(keys[i], len(keys[i]))
# Swap
tmp_pl = pl
pl = pr
pr = tmp_pl
A = pretty_print(pl, len(pl))
B = pretty_print(pr, len(pr))
# Capa de permutacion
pl = permutation_layer(pbox_inv, pl)
pr = permutation_layer(pbox_inv, pr)
C = pretty_print(pl, len(pl))
D = pretty_print(pr, len(pr))
# Funcion f
f1, f2 = f_function(pl)
F1 = pretty_print(f1, len(f1))
F2 = pretty_print(f2, len(f2))
# Operaciones X-OR
pr = xor(pr, xor(f2, keys[i]))
E = pretty_print(pr, len(pr))
pr = xor(pr, f1)
F = pretty_print(pr, len(pr))
row = [i, RK, A, B, C, D, F1, F2, E, F]
rows.append(row)
header_row1 = ["Ronda", "RK", "A", "B", "C", "D","F1", "F2", "E", "F"]
generate_table("ANU Decifrado", header_row1, rows, "anuDecifrado")
return pl + pr
def f_function(pl):
f1 = list(numpy.roll(pl, -3))
f2 = list(numpy.roll(pl, 8))
f1 = sbox_operation(SBOX, f1)
f2 = sbox_operation(SBOX, f2)
return f1, f2 | ccruz182/Lightweight-Cryptography | cryptofeather/ciphers/block_ciphers/anu/cipher.py | cipher.py | py | 3,372 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "constants.BLOCK_LENGTH",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "constants.BLOCK_LENGTH",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "constants.NUMBER_OF_ROUNDS",
"line_number": 21,
"usage_type": "argument"
},
{
"api_... |
22113638832 | import numpy as np
from grabscreen import grab_screen
import cv2
import time
from directkeys import PressKey, ReleaseKey, W, A, S, D
from grabkeys import key_check
import os
from keras.models import load_model
from scanner import process_img
#loading model
model = load_model('model.h5')
#W key press-time bounds
PRESS = 0.23
RELEASE = 0.30
def auto_pilot(direction):
if direction == 1: #Left
ReleaseKey(D)
PressKey(A)
elif direction == 2: #Right
ReleaseKey(A)
PressKey(D)
else: #Straight
ReleaseKey(A)
ReleaseKey(D)
def drive(times):
elapsed_time = times[0] #Period of time from last W-key full release
press_start = times[1] #Last time W-key was pressed
loop = times[2] #Period of while loop
press_time = time.time() - press_start #Period of time W-key was pressed
if elapsed_time < PRESS:
if not press_start:
press_start = time.time()
PressKey(W)
return [elapsed_time,press_start]
elif elapsed_time < RELEASE:
ReleaseKey(W)
if press_start and (press_time > 0.25 or press_time) < 0.15:
print('Warning: Press_time ' + str(press_time) + ' is out of bounds. Consider tuning PRESS/RELEASE parameters if the error occurs frequently.')
return [elapsed_time,0.0]
else:
ReleaseKey(W)
if press_start and (press_time > 0.25 or press_time) < 0.15:
print('Warning: Press_time ' + str(press_time) + ' is out of bounds. Consider tuning PRESS/RELEASE parameters if the error occurs frequently.')
return [0.0,0.0]
def main():
for i in list(range(3))[::-1]:
print(str(i+1))
time.sleep(1)
last_time = time.time()
elapser = 0.0
start = 0.0
pause = False
while True:
keys = key_check()
if 'Q' in keys:
break
if 'P' in keys:
if pause:
pause = False
time.sleep(1)
print('UNPAUSED')
else:
pause = True
time.sleep(1)
print('PAUSED')
if not pause:
loop = time.time() - last_time
elapser = elapser + loop
elapser, start = drive([elapser,start,loop])
last_time = time.time()
screen = grab_screen(region=(0,40,640,480))
proc_img = process_img(screen)
sample = proc_img.reshape(-1,100,100,1)
sample = sample.astype('float32')
sample /= 255
pred = model.predict(sample)
auto_pilot(np.argmax(pred))
main() | pashok3d/GTA_AutoPilot | predictor.py | predictor.py | py | 2,723 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.models.load_model",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "directkeys.ReleaseKey",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "directkeys.D",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "direct... |
10719487359 | import time
import xml.etree.ElementTree as Et
import random
import pathlib
import shutil
from zipfile import ZipFile
def name_for_ik():
"""
:return: выдает имена формата ###-###-###-###-### в шестнадцатиричной системе для интеграционных конвертов
"""
first_part = str(hex(random.randint(1000000000, 9999999999)))
second_part = str(hex(random.randint(10000, 99999)))
third_part = str(hex(random.randint(10000, 99999)))
fourth_part = str(hex(random.randint(10000, 99999)))
fifth_part = str(hex(random.randint(100000000000000, 999999999999999)))
return f'{first_part[2:10]}-{second_part[2:6]}-{third_part[2:6]}-{fourth_part[2:6]}-{fifth_part[2:14]}'
def atribute_generator(char_value): # 17 символов для имени файла ED, 9 символов для номера договора
"""
:param char_value: количество знаков в срезе
:return: случайное число, которое зависит от системной даты и времени
"""
a = str(int(time.time() * 10000000))
rv = random.randint(1, char_value - 1)
return a[len(a) + rv - char_value::] # рандомное число от 1 символа return a[len(a) - char_value::]
def envelope_change_attrib(namespaceprefix, namespaceuri, xml_source_file_path, tags, paramreplace, path_to_save_xml):
"""
Изменение аттрибутов в файле Envelope
:param namespaceprefix: префикс пространства имен в файле envelope (igr)
:param namespaceuri: ссылка пространства имен в envelope
:param xml_source_file_path: путь к файлу envelope
:param tags: теги, по которым идет поиск
:param paramreplace: словарь из параметров тегов и их новых значений
:param path_to_save_xml: путь и имя для готового файла
:return: запись в файл в том же каталоге
"""
Et.register_namespace(namespaceprefix, namespaceuri) # для записи в файле необходимо передать prefix и uri
tree = Et.parse(xml_source_file_path) # открываем xml файл и парсим
root = tree.getroot()
for tag in tags:
for element in root.findall('.//*[@{' + namespaceuri + '}' + tag + ']'): #
for key, value in paramreplace.items():
if element.attrib['{' + namespaceuri + '}' + tag] in 'Document':
element.attrib['{' + namespaceuri + '}fileName'] = value
if element.attrib['{' + namespaceuri + '}' + tag] in key:
if len(str(element.text).strip()) > 0:
if element.text is None:
element.attrib['{' + namespaceuri + '}fileIdentity'] = value
else:
element.text = value
else:
element.attrib['{' + namespaceuri + '}fileIdentity'] = value
tree.write(path_to_save_xml)
def ed421_change_attrib(namespaceprefix, namespaceuri, xml_source_file_path, path_to_save_xml, **kwargs):
"""
Изменение аттрибутов в файле ED421
:param namespaceprefix: префикс пространства имен в файле ED421 (пусто)
:param namespaceuri: ссылка пространства имен в файле ED421
:param xml_source_file_path: путь к файлу ED421
:param path_to_save_xml: путь и имя для готового файла
:param kwargs: аттрибуты тега и их новые значения
:return:
"""
Et.register_namespace(namespaceprefix, namespaceuri)
tree = Et.parse(xml_source_file_path)
root = tree.getroot()
for key, value in kwargs.items():
if root.findall(f'.[@{key}]'): # поиск атрибута в корневом элементе
root.attrib[key] = value
elif root.findall(f'.//*[@{key}]'): # поиск атрибута в дочерних элементах
root.find(f'.//*[@{key}]').set(key, value)
tree.write(path_to_save_xml, encoding='UTF-8', xml_declaration=True) # сохранение xml файла
def routeinfo_change_attrib(namespaceprefix, namespaceuri, xml_source_file_path, path_to_save_xml, new_text):
"""
Редактирование RouteInfo
:param namespaceprefix: префикс пространства имен в файле ED421 (igr)
:param namespaceuri: ссылка пространства имен в файле ED421
:param xml_source_file_path: путь к файлу
:param path_to_save_xml: путь и имя для готового файла
:param new_text: текст, который будет записан между тегами
:return: запись в xml-файл
"""
Et.register_namespace(namespaceprefix, namespaceuri)
tree = Et.parse(xml_source_file_path)
root = tree.getroot()
root.find('{' + namespaceuri + '}DocumentPackID').text = new_text
tree.write(path_to_save_xml) # сохранение xml файла
def create_new_directory(path_to_new_directory, directory_name):
"""
:param path_to_new_directory: путь, где будет создан каталог path
:param directory_name: имя для нового каталога
:return: создает каталог temp по указанному пути, если каталог существут, то перезаписывает его
"""
pathlib.Path(path_to_new_directory).joinpath(directory_name).mkdir(exist_ok=True)
return pathlib.Path(path_to_new_directory).joinpath(directory_name)
def get_arhive(path, *files):
"""
:param path: путь, где будет создан архив
:param files: файлы, которые будут помещаться в архив
:return:
"""
with ZipFile(path, 'w') as new_zip: # добавить после path функцию вызова нового имени
for arg in files:
filename = arg.name
new_zip.write(arg, arcname=filename)
pathlib.Path(arg).unlink()
def move_files(copy_from, copy_to):
"""
:param copy_from: полный путь к файлу, который будет перемещен
:param copy_to: каталог, в который будет перемещен файл
:return: перемещает файл, переданный из copy_from в каталог copy_to
"""
shutil.move(copy_from, copy_to)
# -----------------------------------------------------------
start_path = pathlib.Path.cwd()
envelope_path = start_path.joinpath('sample/envelope.xml')
routeinfo_path = start_path.joinpath('sample/RouteInfo.xml')
ed421_path = start_path.joinpath('sample/ED421.xml')
# -----------------------------------------------------------
# создать каталоги temp, converts внутри каталога
temp_path = create_new_directory(start_path, 'temp')
convert_path = create_new_directory(start_path, 'converts')
# -----------------------------------------------------------
# переменные
prefix_for_routeinfo_envelope = 'igr'
prefix_ed421 = ''
uri_for_routeinfo_envelope = 'http://www.cbr.ru/igr/'
uri_for_ed421 = 'urn:cbr-ru:elk:v2021.1.0'
text_for_sign_file = 'test signature file'
tags_attrib = ['name', 'fileType'] # теги для функции generate_xml_envelope
# -----------------------------------------------------------
# сгенерировать имена для файлов
def create_ik(iteration_count):
"""
:param iteration_count:
:return:
"""
for i in range(1, iteration_count + 1):
arhive_name = name_for_ik() # имя для архива, в который будут упакованы все файлы
ed421_name_for_arh = name_for_ik() # имя для архива, в котором лежит ed421
routeinfo_name = name_for_ik() # имя для routeinfo
sign_name = name_for_ik() # имя для файла с ЭП
# -----------------------------------------------------------
file_name_ed421 = pathlib.Path('ED421' + atribute_generator(17) + '.xml')
new_name_ed421 = temp_path.joinpath(file_name_ed421)
new_name_routeinfo = temp_path.joinpath(routeinfo_name)
new_name_envelope = temp_path.joinpath('envelope.xml')
# -----------------------------------------------------------
# создать файл с подписью
with open(temp_path.joinpath(sign_name), 'w') as sign_file:
sign_file.write(text_for_sign_file)
# заполнение словаря значениями
tags_dictionary = dict(RouteInfo=routeinfo_name,
Document=ed421_name_for_arh,
Sign=sign_name,
AssociatedFileIdentity=ed421_name_for_arh,
fileName='ED421' + atribute_generator(17) + '.xml')
attributes_and_values = dict(EDNo=atribute_generator(8),
EDDate='testEDDate',
ReqNum=atribute_generator(10),
ReqDateTime='testReqDateTime',
GrantDate='testGrantDate',
ApplicationSum=atribute_generator(17))
# изменение значений в ED421 и сохранение в другом каталоге
ed421_change_attrib(prefix_ed421,
uri_for_ed421,
ed421_path,
new_name_ed421,
**attributes_and_values)
# изменение значений в RouteInfo и сохранение в другом каталоге
routeinfo_change_attrib(prefix_for_routeinfo_envelope,
uri_for_routeinfo_envelope,
routeinfo_path,
new_name_routeinfo,
arhive_name)
# изменение значений в RouteInfo и сохранение в другом каталоге
envelope_change_attrib(prefix_for_routeinfo_envelope,
uri_for_routeinfo_envelope,
envelope_path,
tags_attrib,
tags_dictionary,
new_name_envelope)
# добавление ED421 в архив
get_arhive(temp_path.joinpath(ed421_name_for_arh),
new_name_ed421)
# формирование целого конверта
get_arhive(temp_path.joinpath(pathlib.Path(arhive_name + '.zip')),
temp_path.joinpath(ed421_name_for_arh),
new_name_routeinfo,
new_name_envelope,
temp_path.joinpath(sign_name))
# переместить конверт
move_files(temp_path.joinpath(pathlib.Path(arhive_name + '.zip')), convert_path)
# после того как все операции выполнены удалить каталог temp без проверки содержимого (наличия подкаталогов)
shutil.rmtree(temp_path, ignore_errors=True)
if __name__ == '__main__':
create_ik(2)
# TODO добавить изменение даты в трех местах в ED421
| Steelglowhawk/updateTool | generator_func.py | generator_func.py | py | 11,827 | python | ru | code | 1 | github-code | 6 | [
{
"api_name": "random.randint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.randint",
"li... |
71888723067 | import numpy
from sklearn.metrics import cohen_kappa_score, classification_report
import torch
from torch.autograd import Variable
from tqdm import tqdm
import torch.nn as nn
from sklearn.metrics import cohen_kappa_score, classification_report
from models import FitNet_4
from torch import optim
import numpy as np
def evaluation(test_dataloader, model, class_names, epoch, criterion):
eval_loss_list = []
eval_acc = 0
pred_list = []
GT_list = []
pbar_test = tqdm(test_dataloader, total=len(test_dataloader))
with torch.no_grad():
for image, label in pbar_test:
image = Variable(image).cuda()
label = Variable(label).cuda()
out = model(image)
loss = criterion(out, label)
eval_loss_list.append(loss.item())
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
pred_list.extend(pred.cpu().numpy().tolist())
GT_list.extend(label.cpu().numpy().tolist())
eval_acc += num_correct.item()
pbar_test.set_description("Testing:epoch{} loss:{}".format(epoch, loss.item()))
epoch_test_acc = eval_acc / len(pbar_test)
print(
"Testing:epoch{} finished! Total loss:{}".format(epoch,
np.mean(eval_loss_list)))
print(classification_report(y_true=GT_list, y_pred=pred_list, target_names=class_names))
kappa = cohen_kappa_score(y1=pred_list, y2=GT_list)
print("Kappa:{}".format(kappa))
| Fivethousand5k/Pytorch-implemented-ECNN | eval.py | eval.py | py | 1,542 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "tqdm.tqdm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variab... |
30165255260 | # Name: Milou Bisseling
# Studentnumber: 10427538
'''
This program converts CSV to JSON
'''
import csv
import json
import sys
inputfile = 'totalbirths.csv'
outputfile = 'totalbirths.json'
fieldnames = ("Perioden", "Enkelvoudige geboorten", "Tweelinggeboorten", "Drie- of meervoudige geboorten")
# Open and read CSV file
csvfile = open(inputfile, 'r')
reader = csv.DictReader(csvfile, fieldnames)
# Open and write JSON file
jsonfile = open(outputfile, 'w')
data = json.dumps([row for row in reader])
jsonfile.write(data)
csvfile.close()
jsonfile.close() | miloubis/DataProcessing | Homework/week-6/convertCSV2JSON.py | convertCSV2JSON.py | py | 559 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.DictReader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 22,
"usage_type": "call"
}
] |
43755877766 | '''
Measures the square area of colonies in an image file.
Written by George Walters-Marrah
Last updated: 6/26/2019
'''
# import needed packages
import imageio
import matplotlib.pyplot as plt
import scipy.ndimage as ndi
import numpy as np
from skimage import morphology as morph
import os.path
from os import path
def remove_large_objects(ar, max_size=64, connectivity=1, in_place=False):
"""Remove objects larger than the specified size.
Expects ar to be an array with labeled objects, and removes objects
larger than max_size. If `ar` is bool, the image is first labeled.
This leads to potentially different behavior for bool and 0-and-1
arrays.
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the objects of interest. If the array type is
int, the ints must be non-negative.
max_size : int, optional (default: 64)
The largest allowable object size.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel. Used during
labelling if `ar` is bool.
in_place : bool, optional (default: False)
If ``True``, remove the objects in the input array itself.
Otherwise, make a copy.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small connected components removed.
Examples
--------
>>> from skimage import morphology
>>> a = np.array([[0, 0, 0, 1, 0],
... [1, 1, 1, 0, 0],
... [1, 1, 1, 0, 1]], bool)
>>> b = morphology.remove_small_objects(a, 6)
>>> b
array([[False, False, False, False, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> c = morphology.remove_small_objects(a, 7, connectivity=2)
>>> c
array([[False, False, False, True, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> d = morphology.remove_small_objects(a, 6, in_place=True)
>>> d is a
True
"""
if in_place:
out = ar
else:
out = ar.copy()
if max_size == 0: # shortcut for efficiency
return out
if out.dtype == bool:
selem = ndi.generate_binary_structure(ar.ndim, connectivity)
ccs = np.zeros_like(ar, dtype=np.int32)
ndi.label(ar, selem, output=ccs)
else:
ccs = out
try:
component_sizes = np.bincount(ccs.ravel())
except ValueError:
raise ValueError("Negative value labels are not supported. Try "
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`.")
too_big = component_sizes > max_size
too_big_mask = too_big[ccs]
out[too_big_mask] = 0
return out
def measure(imFolder, imVector, imStrain, imPlate, imRepNum, imType , firstMask, secondMaskLow, secondMaskHigh, smallSize, largeSize, stdThreshold, results = True, manual = False, stdManual = 1):
# make an object with the filepath to the image you want to analysis
imName = imVector + '_' + imStrain + '_' + imPlate + '_' + imRepNum
imGenericName = imVector + '_' + imStrain + '_' + imPlate
imPath = imFolder + '/' + imName + imType
# check if the path exists
if path.exists(imPath):
pass
else:
raise ValueError('The PATH specified does not exist. Change PATH and try again.')
# read in plate picture as an uint8 *only works with uint8 dtypes*
im = imageio.imread(imPath)
# prints the dtype and min/max. Values should be: dtype = uint8, min = ~0, max = ~255
dtype = im.dtype
if results:
print('Data type:', dtype)
print('Min. value:', im.min())
print('Max value:', im.max())
print('')
# raises error of image type isn't uint8
if dtype != 'uint8':
raise ValueError(imPath + ' must be uint8. Change image file to uint8 then try again.')
# Gets rid pure white regions of the image
mask = im < firstMask
im_mask = np.where(mask, im, 0)
# show images
if results:
fig, axes = plt.subplots(1,2)
axes[0].imshow(im, cmap = 'gray')
plt.axis('off')
axes[1].imshow(im_mask, cmap = 'gray')
plt.axis('off')
plt.show()
# Uniforms the photo to make the edges clearer and easier to detect
im_filt = ndi.uniform_filter(im_mask, size=3)
# searches for the gray areas (where colonies are).
col_mask1 = im_filt > secondMaskLow
col_mask2 = im_filt < secondMaskHigh
col_mask = col_mask1 & col_mask2
im_colonies = np.where(col_mask, im, 0)
# Creates label objects
labels, nlabels = ndi.label(col_mask)
# Get initial amount of objects found by mask
bboxinitial = ndi.find_objects(labels)
if results:
print('Objects found in initial mask for ' + imPath + ': ', len(bboxinitial))
print('')
# show images
if results:
fig, axes = plt.subplots(1,2)
axes[0].imshow(im_filt, cmap = 'gray')
plt.axis('off')
axes[1].imshow(im_colonies, cmap = 'gray')
plt.axis('off')
plt.show()
# Removes abnormally small or large objects
cols_cleaned1 = morph.remove_small_objects(labels, smallSize)
cols_cleaned2 = remove_large_objects(cols_cleaned1, largeSize)
bboxes = ndi.find_objects(cols_cleaned2)
# shows images
if results:
fig, axes = plt.subplots(1,2)
axes[0].imshow(im_colonies, cmap = 'gray')
plt.axis('off')
axes[1].imshow(cols_cleaned2, cmap = 'rainbow')
plt.axis('off')
plt.show()
# Calculates the colony size
col_size_init = []
for index in range(len(bboxes)):
# excludes colonies with abnormal morphology
npixel = 0
dpixel = 6.45*6.45
colony = cols_cleaned2[bboxes[index]]
std = np.std(colony.shape[:2])
if (std <= stdThreshold):
for image in colony:
for pixel in image:
if pixel > 0:
npixel += 1
meas = npixel*dpixel
measFin = np.around(meas, 2)
col_size_init.append(measFin)
else: pass
# allows you to manually discard bad data points.
if manual:
np_col_size_init = np.array(col_size_init)
col_size = []
for index in range(len(bboxes)):
# excludes colonies with abnormal morphology and perfect squares
size_std = np.std(np_col_size_init)
size_median = np.median(np_col_size_init)
npixel = 0
dpixel = 6.45*6.45
colony = cols_cleaned2[bboxes[index]]
std = np.std(colony.shape[:2])
if (std <= stdThreshold):
for image in colony:
for pixel in image:
if pixel > 0:
npixel += 1
meas = npixel*dpixel
measFin = np.around(meas, 2)
else:
measFin = False
# allows to manually sift through outliers
if measFin == False:
pass
elif measFin < size_median - stdManual * size_std or measFin > size_median + stdManual * size_std:
plt.imshow(im_colonies[bboxes[index]], cmap = 'gray')
plt.axis('off')
plt.show()
ques = input('Do you want to analyze that colony from ' + imName + '(' + imFolder + ')' + '? If yes, type Y. If no, type N:')
if ques == 'Y' or ques == 'y':
col_size.append(measFin)
print('Colony analyzed.')
elif ques == 'N' or ques == 'n':
print('Colony skipped.')
else:
doubleCheck = input('Did you mean to put N?:')
if doubleCheck == 'N' or doubleCheck == 'n':
col_size.append(measFin)
print('Colony analyzed.')
else:
print('Colony skipped.')
else:
col_size.append(measFin)
np_col_size = np.array(col_size)
else:
np_col_size = np.array(col_size_init)
# contains all the calculated diameter values and summarized data
colMean = np.around(np.mean(np_col_size),2)
colMedian = np.around(np.median(np_col_size),2)
colStd = np.around(np.std(np_col_size),2)
data = [imGenericName, colMean, colMedian, colStd, imFolder, imVector, imStrain, imPlate, imRepNum, np_col_size]
# prints out a summary of the results
if results:
print('Data for', imName, '(' + imFolder + ')')
print('Final amount of colonies measured:', len(np_col_size))
print('Mean of data: ', colMean)
print('Median of data: ', colMedian)
print('Standard deviation of data: ', colStd)
print('')
print(imName, 'area calculated.')
print('')
return data
def main():
measure('', '', '', '', '', '', firstMask = 190, secondMaskLow = 50, secondMaskHigh = 185, smallSize = 2, largeSize = 235, stdThreshold = 1)
if __name__ == '__main__': main()
| gwmarrah/colony-measurer | colSizeMeasurer.py | colSizeMeasurer.py | py | 9,835 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "scipy.ndimage.generate_binary_structure",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "numpy.zeros_like",
"line_number": 78,
"usage_type": "call"
},
{
"api_name":... |
8317691335 | # A script for checking triangular arbitrage opportunities (Forward + Reverse)
# Using a general formula. (Pairs NEED to match the formula)
# ETH/USDT, BTC/USDT, BTC/ETH
# a/b, c/b, c/a
import ccxt
# Insert exchange
testexchange = ccxt.kucoin({
'enableRateLimit': True,
})
# Choose whatever 3 pairs match the general formula.
# If changing pairs, Change the fetch_order_book input parameter and the print statement
def get_a_b_bid_ask():
order_book = testexchange.fetch_order_book("BTC/USDT")
a_b_bid = order_book['bids'][0][0] if len (order_book['bids']) > 0 else None
a_b_ask = order_book['asks'][0][0] if len (order_book['asks']) > 0 else None
print(f'(Kucoin test ETH/USDT) The best bid is {a_b_bid}, the best ask is {a_b_ask}')
return a_b_bid, a_b_ask
def get_c_b_ask_bid():
order_book = testexchange.fetch_order_book("ETH/USDT")
c_b_ask = order_book['asks'][0][0] if len (order_book['asks']) > 0 else None
c_b_bid = order_book['bids'][0][0] if len (order_book['bids']) > 0 else None
print(f'(Kucoin test BTC/USDT) The best ask is {c_b_ask}, the best bid is {c_b_bid}')
return c_b_ask, c_b_bid
def get_c_a_bid_ask():
order_book = testexchange.fetch_order_book("ETH/BTC")
c_a_bid = (order_book['bids'][0][0]) if len (order_book['bids']) > 0 else None
c_a_ask = (order_book['asks'][0][0]) if len (order_book['asks']) > 0 else None
print(f'(Kucoin test BTC/ETH) The best bid is {c_a_bid}, the best ask is {c_a_ask}')
return c_a_bid, c_a_ask
# General formula for the forward arb rate:
# a: the coin to be targeted for arbitrage
def calculate_forward_arb_rate(a_b_bid, c_b_ask, c_a_bid):
forward_rate = a_b_bid * (1/c_b_ask) * c_a_bid
print(f"The forward arbitrage percent is {(forward_rate-1) *100}%")
# General formula for the reverse arb rate:
# a: the coin to be targeted for arbitrage
def calculate_reverse_arb_rate(c_a_ask, c_b_bid, a_b_ask):
reverse_rate = (1/c_a_ask)*(c_b_bid)*(1/a_b_ask)
print(f"The reverse arbitrage percent is {(reverse_rate-1) *100}%")
#ETH/USDT
a_b_bid, a_b_ask = get_a_b_bid_ask()
#BTC/USDT
c_b_ask, c_b_bid = get_c_b_ask_bid()
#BTC/ETH
c_a_bid, c_a_ask = get_c_a_bid_ask()
calculate_forward_arb_rate(a_b_bid, c_b_ask, c_a_bid)
calculate_reverse_arb_rate(c_a_ask, c_b_bid, a_b_ask)
#print(ccxt.exchanges) | AgenP/AgenP-triangular-arb-cex-scanner-v1 | arb_ku_test.py | arb_ku_test.py | py | 2,318 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ccxt.kucoin",
"line_number": 10,
"usage_type": "call"
}
] |
15805513471 | from tkinter import N
import pygame
from pygame.locals import *
from Car import Car
import sys
import neat
import time
def on_init():
pygame.init()
on_init()
screen_width = 1920
screen_height = 1080
_running = True
screen = pygame.display.set_mode((screen_width, screen_height), pygame.HWSURFACE | pygame.DOUBLEBUF)
screen.fill("WHITE")
# Number of ms in 1 time unit
# Needed for acceleration
time_unit = 15
pygame.key.set_repeat(time_unit)
FPS = 60
fpsClock = pygame.time.Clock()
# Load game assests
# map as background image
background_image = pygame.image.load("images/map.png").convert_alpha()
# Car image used from : https://github.com/NeuralNine/ai-car-simulation/blob/master/car.png
car_image = pygame.image.load("images/car.png").convert_alpha()
car_image = pygame.transform.scale(car_image, (100, 50))
car_1 = Car(car_image, 881, 800, 0)
#car_1 = Car(car_image, 500, 500, 0)
def on_event(event):
if event.type == QUIT:
on_cleanup()
# if pygame.key.get_pressed()[K_UP]:
# car_1.move_forward()
# if pygame.key.get_pressed()[K_DOWN]:
# car_1.move_backward()
# if pygame.key.get_pressed()[K_LEFT]:
# car_1.move_left()
# if pygame.key.get_pressed()[K_RIGHT]:
# car_1.move_right()
def on_loop():
pass
def on_render():
screen.blit(background_image, (0, 0))
car_1.get_data()
car_1.draw(screen)
pygame.display.flip()
def on_cleanup():
pygame.display.quit()
pygame.quit()
sys.exit()
def run_simulation(genomes, config):
# Empty Collections For Nets and Cars
nets = []
cars = []
# For All Genomes Passed Create A New Neural Network
for i, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
g.fitness = 0
cars.append(Car(car_image, 881, 800, 0))
# timeout = time.time() + 60*5 # 5 minutes from now
#timeout = time.time() + 15 # 10 seconds from now
timeout = time.time() + 15 # 15 seconds after current time
while(_running):
# End the game when the X is pressed
for event in pygame.event.get():
on_event(event)
# For Each Car see if its alive
# Get the action it should take
# Draw the car
screen.blit(background_image, (0, 0))
cars_alive = 0
for i, car in enumerate(cars):
if car.is_alive:
cars_alive += 1
genomes[i][1].fitness = car.get_fitness()
output = nets[i].activate(car.get_data())
#### This needs to be tested
choice = output.index(max(output))
if choice == 0:
car.move_forward()
elif choice == 1:
car.move_backward()
elif choice == 2:
car.move_left()
else:
car.move_right()
car.draw(screen)
pygame.display.flip()
if cars_alive==0:
break
if time.time()>timeout:
break
# if time.time()>timeout:
# break
#on_loop()
#on_render()
fpsClock.tick(FPS)
# Load Config
config_path = "config.txt"
config = neat.config.Config(neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
config_path)
# Create Population And Add Reporters
population = neat.Population(config)
population.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
population.add_reporter(stats)
# Run Simulation For A Maximum of 1000 Generations
population.run(run_simulation, 10000)
on_cleanup()
# Use this to save genomes
# https://github.com/CodeReclaimers/neat-python/blob/master/neat/checkpoint.py
# Use this to visualize the network
# https://ai.stackexchange.com/questions/13948/library-for-rendering-neural-network-neat | styyxofficial/NEAT-AI-Racecar | Moving_Car.py | Moving_Car.py | py | 4,216 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.HWSURF... |
11245689019 | import torch
from torch.utils.data import DataLoader
from transformers import AdamW
from core.qa.utils import (
read_squad,
add_end_idx,
add_token_positions,
tokenizer,
model,
)
train_contexts, train_questions, train_answers = read_squad(
"squad-style-answers.json"
)
train_encodings = tokenizer(
train_contexts, train_questions, truncation=True, padding=True
)
add_end_idx(train_answers, train_contexts)
add_token_positions(train_encodings, train_answers)
class SquadDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __getitem__(self, idx):
return {
key: torch.tensor(val[idx]) for key, val in self.encodings.items()
}
def __len__(self):
return len(self.encodings.input_ids)
train_dataset = SquadDataset(train_encodings)
device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
model.to(device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
optim = AdamW(model.parameters(), lr=5e-5)
for epoch in range(3):
for batch in train_loader:
optim.zero_grad()
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
start_positions = batch["start_positions"].to(device)
end_positions = batch["end_positions"].to(device)
outputs = model(
input_ids,
attention_mask=attention_mask,
start_positions=start_positions,
end_positions=end_positions,
)
loss = outputs[0]
loss.backward()
optim.step()
model.eval()
torch.save(model.state_dict(), "core/qa/saved_models/model_weights.pth")
| expectopatronm/FAQ-Generation-and-SQuaD-Style-QA-Answering-System | core/qa/trainer.py | trainer.py | py | 1,760 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "core.qa.utils.read_squad",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "core.qa.utils.tokenizer",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "core.qa.utils.add_end_idx",
"line_number": 22,
"usage_type": "call"
},
{
"api_na... |
40677441703 | """empty message
Revision ID: 37bd12af762a
Revises: fa12c537244a
Create Date: 2022-09-06 21:29:41.287889
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '37bd12af762a'
down_revision = 'fa12c537244a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('cbsds', 'antenna_beamwidth_deg')
op.drop_column('cbsds', 'cpi_digital_signature')
op.drop_column('cbsds', 'horizontal_accuracy_m')
op.drop_column('cbsds', 'antenna_model')
op.drop_column('cbsds', 'eirp_capability_dbm_mhz')
op.drop_column('cbsds', 'antenna_azimuth_deg')
op.drop_column('cbsds', 'antenna_downtilt_deg')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('cbsds', sa.Column('antenna_downtilt_deg', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('antenna_azimuth_deg', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('eirp_capability_dbm_mhz', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('antenna_model', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('horizontal_accuracy_m', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('cpi_digital_signature', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('cbsds', sa.Column('antenna_beamwidth_deg', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| magma/magma | dp/cloud/python/magma/db_service/migrations/versions/020_remove_cpi_related_fields.py | 020_remove_cpi_related_fields.py | py | 1,747 | python | en | code | 1,605 | github-code | 6 | [
{
"api_name": "alembic.op.drop_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",... |
18128239741 | from django.conf.urls import url, include
from . import views
#app_name = 'dmlpolls'
urlpatterns = [
url(r'^$', views.index, name='poll_index'),
url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='poll_detail'),
url(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='poll_results'),
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='poll_vote'),
url(r'^polls/', include('dmlpolls.urls')),
# url(r'^admin/', admin.site.urls),
]
| Yobmod/dmlsite | dmlpolls/urls_old.py | urls_old.py | py | 457 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.co... |
38463867669 | from django.urls import path, re_path
from app import views
urlpatterns = [
# Matches any html file - to be used for gentella
# Avoid using your .html in your resources.
# Or create a separate django app.
re_path(r'^.*\.html', views.gentella_html, name='index'),
# The home page
path('', views.index, name='index'),
path('outliers', views.outliers, name='outliers'),
path('data_fresh', views.data_fresh, name="data_fresh"),
path('data_fresh\\', views.data_fresh, name="data_fresh"),
path('data_fresh/', views.data_fresh, name="data_fresh"),
path('data_fresh_tem_table', views.data_table_tem_fresh_with_pred, name='data_table_tem_fresh'),
path('data_live_tem', views.live_tem, name='data_live_tem'),
]
| pennng/Django-gentella | app/urls.py | urls.py | py | 752 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.re_path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "app.views.gentella_html",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "app.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.p... |
41163745392 | import os.path
from compiler import compile_file
from interpreter import VirtualMachine
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
raise RuntimeError("Not enough argument to start the compiler")
else:
if sys.argv[1] == "--version":
print("0.2.0-dev")
else:
compiled = compile_file(sys.argv[1])
vm = VirtualMachine()
vm.load_bytecode(compiled, path=os.path.abspath(sys.argv[1]))
vm.init_eval_loop()
| blitpxl/soil | soil/soil.py | soil.py | py | 509 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "compiler.compile_file",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line... |
6849069742 | #三方库 xlrd /xlwt / xlutils
import xlrd
import xlwt
import xlutils
wb = xlrd.open_workbook('table/阿里巴巴2020年股票数据.xls')
#获取所有工作表的名字
# print(wb.sheet_names())
# sheet1 = wb.sheet_names('表格1') #通过工作表名获取
sheet = wb.sheet_by_index(0) #通过工作表的下标ID获取工作表
#获取工作表的行数,列数
# print(sheet.nrows,sheet.ncols)
#获取单元格数据 第一行的第一列
for i in range(sheet.nrows):
for j in range(sheet.ncols):
value1 = sheet.cell(i,j).value
if i >0 :
print(f"{value1:.2f}", end='\t')
else:
print(value1, end=' \t')
print()
# print(sheet.row(i)[j].value , end=' ')
| twlaladelala/pytest | 办公自动化.py | 办公自动化.py | py | 732 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "xlrd.open_workbook",
"line_number": 5,
"usage_type": "call"
}
] |
72592324348 | import configparser
import os
from core.constants import CONFIG_FILE_PATH
__all__ = [
'BotConfigurator'
]
class BotConfigurator(object):
""" Объект-конфигуратор приложения. """
def __new__(cls, *args, **kwargs):
if not hasattr(cls, 'instance'):
cls.instance = super(BotConfigurator, cls).__new__(cls)
return cls.instance
def __init__(self):
config_path = os.path.join(CONFIG_FILE_PATH)
if os.path.isfile(config_path):
self._app_config = configparser.ConfigParser()
self._app_config.read(config_path)
else:
self._app_config = {
'telegram': {
'token': os.environ['TELEGRAM_TOKEN'],
'proxy_url': os.environ.get('PROXY_URL', None)
},
'dialog_flow': {
'token': os.environ['DIALOG_FLOW_TOKEN'],
'lang': os.environ['DIALOG_FLOW_LANG'],
'session_id': os.environ['DIALOG_FLOW_SESSION_ID']
}
}
@property
def app_config(self):
return self._app_config
| balandin-nick/smart-telegram-bot | core/configurator.py | configurator.py | py | 1,174 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "core.constants.CONFIG_FILE_PATH",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path... |
36464449846 | #!/usr/bin/env python
"""
Usage:
python detectface.py -i image.jpg
"""
from argparse import ArgumentParser
import boto3
from pprint import pprint
import sys
def get_client(endpoint):
client = boto3.client('rekognition')
return client
def get_args():
parser = ArgumentParser(description='Detect faces')
parser.add_argument('-e', '--endpoint')
parser.add_argument('-i', '--image')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
if (args.image is None):
print('''
Usage:
python detectface.py --help
''')
sys.exit(-1)
client = get_client(args.endpoint)
with open(args.image, 'rb') as image:
response = client.detect_faces(Image={'Bytes': image.read()},Attributes=['ALL'])
pprint(response)
| wwwins/aws-utils | detectface.py | detectface.py | py | 830 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "boto3.client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"li... |
19880677789 | # cython:language_level=3
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
deltas = 2000
a = 215
r = 290
def fx(n):
global deltas, a, r
x1 = a+n
x2 = a-n
return deltas - (area(x2, r) - area(x1, r))
def fx1(n):
a = 215
r = 290
x1 = a+n
x2 = a-n
return (area(x2, r) - area(x1, r))
def area(x,r):
S = (r**2)*np.arccos(x/r) - x*np.sqrt(r**2 - x**2)
return S
# x = [x * 2.77 for x in range(11)]
# x1 = [x * 0.02 for x in range(0, 11)]
# y = []
# for i in range(11):
# a = x[i]
# y.append(fx1(a)+4909)
#
# print(y)
#
# plt.plot(x1,y)
# plt.xlabel('offset_pixel')
# plt.ylabel('pixel_different')
# x_ticks = np.arange(0,0.23,0.02)
# plt.xticks(x_ticks)
# plt.show()
# plt.savefig('plot_fig.jpg')
# print(fx1(2.56))
# print(area(0, 290))
# print(fx(0))
def main(x1, x2, x3):
global deltas, a, r
deltas = x1
a = x2
r = x3
root = optimize.bisect(fx, 0, 30)
print(root)
root = root * 0.0024
# #
print(root)
| rzyfrank/Internship | cal_deltaArea.py | cal_deltaArea.py | py | 1,006 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.arccos",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.bisect",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "scipy.optimize",
"l... |
7795403561 | from data_loader import SimpleDataset
import string
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
import numpy as np
# from run_experiment import save_top_ranks, graph_top_ranks, save_model
# Outputs image features from words
class GRU_REG(nn.Module):
def __init__(self, vocab_size, loss_fn=None, hidden_layer_dim=256, embedding_space=150, use_cuda=False, n_layers=1):
super().__init__()
self.hidden_layer_dim = hidden_layer_dim
self.n_layers = n_layers
self.embedding_space = embedding_space
self.embeddings = nn.Embedding(vocab_size, embedding_space)
self.gru = nn.GRU(embedding_space, hidden_layer_dim, n_layers, batch_first=True)
self.output_layer = nn.Linear(hidden_layer_dim, 2048)
self.use_cuda = use_cuda
self.float_type = torch.FloatTensor
self.long_type = torch.LongTensor
if use_cuda:
print("Using cuda")
self.float_type = torch.cuda.FloatTensor
self.long_type = torch.cuda.LongTensor
self.cuda()
if loss_fn is None:
# self.loss_fn = torch.nn.SmoothL1Loss(size_average=True)
self.loss_fn = torch.nn.MSELoss(size_average=True)
else:
self.loss_fn = loss_fn
def forward(self, sentences, sentences_mask):
batch_size = sentences.data.shape[0]
sequence_size = sentences.data.shape[1]
embeds = self.embeddings(sentences)
packed_embedding = pack_padded_sequence(embeds.view(batch_size, -1, self.embedding_space), sentences_mask, batch_first=True)
outputs, h_gru = self.gru(packed_embedding)
## unpacking: notice that: last_out == h_gru[0,:,:]
# outputs_pad, output_lengths = pad_packed_sequence(outputs, batch_first=True)
# output_lengths = Variable(torch.LongTensor(output_lengths))
# last_out = torch.gather(outputs_pad, 1, output_lengths.view(-1, 1, 1).expand(batch_size, 1, self.hidden_layer_dim)-1).view(batch_size, self.hidden_layer_dim)
predicted_image_features = self.output_layer(F.selu(h_gru[0,:,:]))
return predicted_image_features
def format_sample_into_tensors(self, sample_batch, sample_batch_length, w2i):
# Forward and backward pass per image, text is fixed
b_index = 0
#Padding
sentence_max_length = 0
sentences_mask = []
for sample in sample_batch:
temp_sentence_length = len(sample["processed_word_inputs"])
sentences_mask.append(temp_sentence_length)
if temp_sentence_length > sentence_max_length:
sentence_max_length = temp_sentence_length
word_inputs = np.zeros((sample_batch_length, sentence_max_length)) #Padding zeros
outputs = np.zeros((sample_batch_length, 2048))
for sample in sample_batch:
for index, x in enumerate(sample["processed_word_inputs"]):
word_inputs[b_index][index] = w2i[x]
outputs[b_index] = sample["target_img_features"] #torch.from_numpy().type(self.float_type)
b_index +=1
#Sort
sorted_index = len_value_argsort(sentences_mask)
word_inputs = [word_inputs[i] for i in sorted_index]
word_inputs = torch.from_numpy(np.array(word_inputs, dtype=np.int64))
inputs = Variable(word_inputs.type(self.long_type))
outputs = [outputs[i] for i in sorted_index]
outputs = torch.from_numpy(np.array(outputs))
outputs = Variable(outputs.type(self.float_type))
sentences_mask = [sentences_mask[i] for i in sorted_index]
return inputs, sentences_mask, outputs, sorted_index
def top_rank_accuracy(self, predictions, dataset, sorted_index, top_param=3, val=False, print_failed=False):
# if self.use_cuda:
# predictions = predictions.cpu()
total_size = len(predictions)
correct = 0
correct_cos = 0
dataset = [dataset[i] for i in sorted_index]
for index, prediction in enumerate(predictions):
sample = dataset[index]
actual_slice = np.zeros(10)
prediction_slice = np.zeros(10) #loss from each image
similarity_slice = np.zeros(10)
b_index = 0
for image_id in sample['img_list']:
image_features = sample['img_features'][image_id]
image_features_tensor = Variable(
torch.from_numpy(
image_features).type(self.float_type))
image_loss_from_prediction = self.loss_fn(prediction, image_features_tensor)
image_similarity_from_prediction = F.cosine_similarity(prediction, image_features_tensor, dim=0)
prediction_slice[b_index] = 1.0 - image_loss_from_prediction.data[0]
similarity_slice[b_index] = image_similarity_from_prediction.data[0]
if image_id == sample['target_img_id']:
actual_slice[b_index] = 1.0
b_index += 1
#do argmax on n (top_param) indexes
prediction_indexes = prediction_slice.flatten().argsort()[-top_param:][::-1]
similarity_indexes = similarity_slice.flatten().argsort()[-top_param:][::-1]
if actual_slice[prediction_indexes].any():
correct += 1
if actual_slice[similarity_indexes].any():
correct_cos += 1
else:
if print_failed:
print("INCORRECT")
print(sample)
if val == True:
print(f"{correct} correct out of {total_size} using loss")
print(f"{correct_cos} correct out of {total_size} using cosine similarity")
return float(correct_cos) / total_size
def train_gru_reg_network(dataset,
validation_dataset,
loss_fn=None,
embedding_space=150,
num_epochs=15,
batch_size=32,
save_model=False,
learning_rate = 0.0001,
hidden_layer_dim=256,
use_cuda=False):
if loss_fn is None:
# loss_fn = torch.nn.SmoothL1Loss(size_average=True)
loss_fn = torch.nn.MSELoss(size_average=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=lambda x: x,
# shuffle=False)
shuffle=True)
# Actually make the model
model = GRU_REG(dataset.vocab_size, loss_fn=loss_fn,
embedding_space=embedding_space,
hidden_layer_dim=hidden_layer_dim, use_cuda=use_cuda)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
train_loss = 0.0
top_rank_1_arr = np.zeros(num_epochs)
top_rank_3_arr = np.zeros(num_epochs)
top_rank_5_arr = np.zeros(num_epochs)
for ITER in range(num_epochs):
print(f"Training Loss for {ITER} : {train_loss}")
train_loss = 0.0
count = 0
t_rank_1 = 0
for sample_batch in dataloader:
# Forward and backward pass per image, text is fixed
inputs, sentences_mask, outputs, sorted_index = model.format_sample_into_tensors(sample_batch, batch_size, dataset.w2i)
count += batch_size
prediction = model(inputs, sentences_mask)
loss = model.loss_fn(prediction, outputs)
if use_cuda:
loss = loss.cuda()
train_loss += loss.data[0]
print(f"Loss : {loss.data[0]} \t Count: {count}", end="\r")
# backward pass
model.zero_grad()
# loss.backward(retain_graph=True)
loss.backward()
# update weights
optimizer.step()
print("\n")
validation_loss, top_rank_1, top_rank_3, top_rank_5 = validate_gru_reg_model(
dataset.vocab_size,
dataset.w2i,
validation_dataset,
model=model)
top_rank_1_arr[ITER] = top_rank_1
top_rank_3_arr[ITER] = top_rank_3
top_rank_5_arr[ITER] = top_rank_5
print(f"Top 1: {top_rank_1}")
print(f"Top 3: {top_rank_3}")
print(f"Top 5: {top_rank_5}")
if save_model:
torch.save(model.state_dict(), "data/gru_reg.pt")
return model, top_rank_1_arr, top_rank_3_arr, top_rank_5_arr
def validate_gru_reg_model(vocab_size, w2i, validation_dataset, model_filename="gru_reg.pt",
model=None, embedding_space = 150, print_failed=False):
print("Evaluating model on validation set")
if model is None:
print("Loading Saved Model: " + model_filename)
model = GRU_REG(vocab_size, 2048, hidden_layer_dim=256)
if not use_cuda:
#loading a model compiled with gpu on a machine that does not have a gpu
model.load_state_dict(torch.load("data/"+model_filename, map_location=lambda storage, loc: storage))
else:
model.load_state_dict(torch.load("data/"+model_filename))
model = model.cuda()
val_dl = torch.utils.data.DataLoader(validation_dataset, batch_size=64, collate_fn=lambda x: x)
predictions = None
outputs = None
sorted_index = []
word_inputs, sentences_mask, outputs, sorted_index = model.format_sample_into_tensors(validation_dataset, len(validation_dataset), w2i)
for i in range(0, len(validation_dataset), 64):
words = word_inputs[i:i+64]
mask = sentences_mask[i:i+64]
pred = model(words, mask)
if predictions is None:
predictions = pred
else:
predictions = torch.cat((predictions, pred), dim=0)
loss = model.loss_fn(predictions, outputs)
print(f"Validation Loss : {loss.data[0]}")
top_rank_1 = model.top_rank_accuracy(predictions, validation_dataset, sorted_index, top_param=1, val=True)
top_rank_3 = model.top_rank_accuracy(predictions, validation_dataset, sorted_index, top_param=3, val=True)
top_rank_5 = model.top_rank_accuracy(predictions, validation_dataset, sorted_index, top_param=5, val=True, print_failed=print_failed)
return loss.data[0], top_rank_1, top_rank_3, top_rank_5
def len_value_argsort(seq):
return sorted(range(len(seq)), key=lambda x: seq[x], reverse=True)
if __name__ == "__main__":
use_cuda = torch.cuda.is_available()
dataset = SimpleDataset(
training_file="IR_train_easy.json",
preprocessing=True,
preprocessed_data_filename="easy_training_processed_with_questions"
)
validation_dataset = SimpleDataset(
training_file="IR_val_easy.json",
preprocessing=True,
preprocessed_data_filename="easy_val_processed_with_questions"
)
model, top_rank_1_arr, \
top_rank_3_arr, top_rank_5_arr = train_gru_reg_network(
dataset,
validation_dataset,
num_epochs=50,
batch_size=256,
embedding_space=300,
hidden_layer_dim=256,
learning_rate=0.001,
use_cuda=use_cuda)
save_model("GRU_REG_EASY",
hidden_layer_dim=256,
embedding_space=300,
learning_rate=0.001,
loss_fn_name="mse",
model=model)
save_top_ranks(top_rank_1_arr, top_rank_3_arr, top_rank_5_arr, "./results_gru_reg_easy_with_questions.p")
# graph_top_ranks(top_rank_1_arr, top_rank_3_arr, top_rank_5_arr)
| geenen124/nlp_project | gru_regression.py | gru_regression.py | py | 12,316 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
13864700033 | #!/usr/bin/python3.5
# -*-coding:Utf-8 -*
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from sklearn.decomposition import PCA
from matplotlib.ticker import FormatStrFormatter
from RBFKernelPCA import RBF_Kernel_PCA
# We create a dataset of two half moons and project them on 1 dimensional space
values, classes = make_moons(n_samples = 100, random_state = 123)
kernel_pca_values, lambdas = RBF_Kernel_PCA(values, gamma = 15, n_components = 1)
# We consider that the 26th point is a new point to project
new_value = values[25]
print('New value: {}'.format(new_value))
original_projected_value = kernel_pca_values[25]
print('Original projection: {}'.format(original_projected_value))
# We define a projection function for new values
def project_value(new_value, values, gamma, kernel_pca_values, lambdas):
pairwise_distances = np.array([np.sum((new_value - row)**2) for row in values])
kernel = np.exp(-gamma * pairwise_distances)
return kernel.dot(kernel_pca_values / lambdas)
# We use the projection to recalculate the projection of the 26th point
new_projected_value = project_value(new_value, values, 15, kernel_pca_values, lambdas)
print('New projection: {}'.format(new_projected_value))
# Now we visualize the projection on the first principal components
plt.scatter(kernel_pca_values[classes == 0, 0], np.zeros((50)), color = 'red', marker = '^',alpha = 0.5)
plt.scatter(kernel_pca_values[classes == 1, 0], np.zeros((50)), color = 'blue', marker = 'o', alpha = 0.5)
plt.scatter(original_projected_value, 0, color = 'black', label = 'original projection of point X[25]', marker = '^', s = 100)
plt.scatter(new_projected_value, 0, color = 'green', label = 'remapped point X[25]', marker = 'x', s = 500)
plt.legend(scatterpoints = 1)
plt.show()
| PiggyGenius/MachineLearning | NoLibraries/RBFKernelPCA/ProjectNewDataPoints.py | ProjectNewDataPoints.py | py | 1,809 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.datasets.make_moons",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "RBFKernelPCA.RBF_Kernel_PCA",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "n... |
31622032831 | import os
from flask import Flask, flash, request, redirect, url_for, send_from_directory, jsonify
from werkzeug.utils import secure_filename
from excel import Excel
from translator import Translator
UPLOAD_FOLDER = './text_files'
DOWNLOAD_FOLDER = './excel_files'
ALLOWED_EXTENSIONS = {'txt'}
app = Flask(__name__, static_folder='./client/build', static_url_path='/')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['DOWNLOAD_FOLDER'] = DOWNLOAD_FOLDER
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/api/', methods=["POST"])
def create_terminology():
if 'file' not in request.files:
code = 400
msg = "file not in request"
return code, msg
file = request.files['file']
if file.filename == '':
code = 400
msg = "file name empty"
return code, msg
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
text = open(os.path.join(
app.config['UPLOAD_FOLDER'], filename), 'r').read()
if len(text) == 0:
code = 400
msg = "File is empty, please provide a valid '.txt' file."
return code, msg
else:
translator = Translator(text)
translator.detect_source_language(" ".join(text.split(" ")[:5]))
translator.set_stop_words()
tokenized_text = translator.tokenize_text()
words = translator.parse_words_alpha(tokenized_text)
terms = translator.translate(words)
source_language, target_language = translator.get_source_and_target()
terminology_excel = Excel(os.path.splitext(filename)[0])
terminology_excel.write_worksheet(
terms, source_language, target_language)
terminology_excel.close_workbook()
response = send_from_directory(
app.config['DOWNLOAD_FOLDER'], f'{os.path.splitext(filename)[0]}.xlsx', as_attachment=True)
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))
os.remove(os.path.join(
app.config['DOWNLOAD_FOLDER'], f'{os.path.splitext(filename)[0]}.xlsx'))
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000)))
| atakanzen/terminolator.web | app.py | app.py | py | 2,579 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request.files",
... |
7002219401 | import sendgrid
from ...common import config
sg = sendgrid.SendGridClient(config.sendgrid_api_key)
def send(name, email, subject, html):
message = sendgrid.Mail()
message.add_to('{}'.format(email))
message.set_subject(subject)
message.set_html(html)
message.set_from(config.from_header)
status, msg = sg.send(message) | minupalaniappan/gradfire | daviscoursesearch/flaskapp/service/email.py | email.py | py | 330 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "sendgrid.SendGridClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "common.config.sendgrid_api_key",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "common.config",
"line_number": 4,
"usage_type": "name"
},
{
"api_name":... |
28128126397 | '''
Count the nodes in the global phylogeny
python3 count_nodes.py after_usher_optimized_fasttree_iter6.tree
'''
import sys
from ete3 import Tree
t = Tree(sys.argv[1])
ct = 0
for node in t.traverse('postorder'):
if node.is_leaf():
ct += 1
print(ct)
| bpt26/parsimony | 2_optimize_starting_tree/results/2.3.5/count_nodes.py | count_nodes.py | py | 270 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "ete3.Tree",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
}
] |
24082328734 | #!/usr/bin/python3
"""
Make petitions to the Reddit API
"""
from requests import get
def number_of_subscribers(subreddit):
"""
Takes a subreddit and compute the quantity of subs
"""
base_url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
header = {
'User-Agent': 'Linux:api_advanced:v0.0.0 (by /u/ElEnriquez)'
}
response = get(base_url, headers=header, allow_redirects=False)
if (response.status_code != 200):
return (0)
data = response.json()
subs = data.get('data').get('subscribers')
return (subs)
| WardenCode/holbertonschool-system_engineering-devops | 0x16-api_advanced/0-subs.py | 0-subs.py | py | 584 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
}
] |
24025809080 | __author__ = 'sivvaidyanathan'
from urllib2 import urlopen
from bs4 import BeautifulSoup
import codecs, sys
filename = sys.argv[0]
reader = open(filename, 'r')
writer = codecs.open(filename + "_canonical", 'w', 'utf-8')
for line in reader:
url = line.strip()
if url.find("http") == -1:
url = "http://" + url
data = urlopen(url).read()
soup = BeautifulSoup(data)
links = soup.findAll('link', rel="canonical")
for link in links:
writer.write(url + "\t" + link["href"] + "\n")
| sivaramakrishnanvaidyanathan/crawler | histogram/link_canonical.py | link_canonical.py | py | 520 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line... |
436459396 | from gensim.corpora import TextCorpus, TextDirectoryCorpus
from gensim.models.doc2vec import TaggedDocument
from trec.treccorpus import TrecCorpus
def test_get_texts():
path = "F:/Corpus/trectest/"
file = path + "fr881.dat"
# with open(file, 'r') as fp:
# print(fp.read())
trecc = TrecCorpus(path, dictionary={})
for text, docno in trecc.get_texts():
print(docno, text)
# print(trecc.getstream())
def test_parse_file():
def test():
for i in range(0,10):
yield i
for i in test():
print(i)
break
for i in test():
print(i)
break
def test_read_doc():
a = "ddsad"
b = [1,2,3,4,5]
class TaggedTrecDocument(object):
def __init__(self, trec):
self.trec = trec
self.trec.metadata = True
def __iter__(self):
for content, (doc_id, title) in self.trec.get_texts():
yield TaggedDocument(content, [doc_id])
def test_parse_text2222():
# from trec.treccorpus import TrecCorpus
pname = "f:/Corpus/trectest/"
textt = TextDirectoryCorpus(pname, dictionary={}, metadata=True, lines_are_documents=True)
documents = TaggedTrecDocument(textt)
print(sum(1 for _ in documents))
print(sum(1 for _ in documents))
print(sum(1 for _ in documents))
def test_parse_text():
# from trec.treccorpus import TrecCorpus
pname = "f:/Corpus/trectest/"
trecc = TrecCorpus(pname, dictionary={}, metadata=True)
documents = TaggedTrecDocument(trecc)
print(sum(1 for _ in documents))
print(sum(1 for _ in documents))
print(sum(1 for _ in documents))
# total = 0
# print()
# for text, (docno, title) in trecc.get_texts():
# # print(docno)
# total += 1
# print(docno)
# # print(next(trecc.get_texts()))
# print(total)
def test_traverse_all_docs():
# pname = "f:/Corpus/TrecData/"
pname = "f:/Corpus/trectest/"
trecc = TrecCorpus(pname, dictionary={})
count = 0
for text, docno in trecc.get_texts():
count += 1
if count % 1000 == 0:
print(docno, text)
break
def test_save_to_file():
pname = "f:/Corpus/trectest/"
trecc = TrecCorpus(pname, dictionary={})
sfile = "f:/Corpus/savetest.csv"
trecc.save_to_file(sfile) | kongyq/Project-Arcs | trec/test_treccorpus.py | test_treccorpus.py | py | 2,325 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "trec.treccorpus.TrecCorpus",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "trec.treccorpus",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "gensim.models.doc2vec.TaggedDocument",
"line_number": 46,
"usage_type": "call"
},
{
"a... |
26327096081 | import money
from exceptions import *
from tkinter import *
from datetime import *
from decimal import Decimal
import math
import re
from tkinter import messagebox
from dateutil.rrule import *
from parkomat_interface import ParkomatInterface
class ParkomatFunctions:
""" Klasa realizująca funkcjonalności programu """
__global_date = datetime.now() # zmienna przechowująca aktualnie ustawioną datę w parkomacie
__departure_time = __global_date # zmienna przechowująca czas wyjazdu
__previous_time = 0 # zmienna przechowująca poprzednio zwrócony czas w sekundach dla wrzuconych pieniędzy dla metody seconds_for_money
__inserted_money_by_user = Decimal("0.00") # zmienna przechowująca liczbę wrzuconych pieniędzy przez aktualnego użytkownika
def __init__(self):
self.__window = Tk() # Toplevel widget reprezentujący główne okno programu
self.__interface = ParkomatInterface(self.window) # interfejs programu
self.__moneyHolder = self.interface.moneyHolder # instancja przechowywacza pieniędzy
self.buttons_onclick() # metoda dodające wydarzenia do przycisków
self.actual_date() # metoda aktualizująca datę parkomatu oraz wyjazdu
@property
def window(self):
""" Getter zwracający Toplevel widget reprezentujący główne okno programu """
return self.__window
@window.setter
def window(self, window):
""" Setter ustawiający Toplevel widget reprezentujący główne okno programu """
self.__window = window
@property
def interface(self):
""" Getter zwracający odwołanie do interfejsu programu """
return self.__interface
@interface.setter
def interface(self, interface):
""" Setter ustawiające odwołanie do interfejsu programu """
self.__interface = interface
@property
def moneyHolder(self):
""" Getter zwracający przechowywacz pieniędzy """
return self.__moneyHolder
@moneyHolder.setter
def moneyHolder(self, moneyHolder):
""" Setter ustawiający przechowywacz pieniędzy """
self.__moneyHolder = moneyHolder
@property
def global_date(self):
""" Getter zwracający aktualnie ustawioną datę w parkomacie """
return self.__global_date
@property
def departure_time(self):
""" Getter zwracający datę wyjazdu """
return self.__departure_time
@global_date.setter
def global_date(self, global_date):
""" Setter ustawiający aktualną datę w parkomacie """
self.__global_date = global_date
@departure_time.setter
def departure_time(self, departure_time):
""" Setter ustawiający datę wyjazdu """
self.__departure_time = departure_time
@property
def previous_time(self):
""" Getter zwracający poprzednio dodany czas """
return self.__previous_time
@previous_time.setter
def previous_time(self, previous_time):
""" SSetter ustawiający poprzednio dodany czas """
self.__previous_time = previous_time
@property
def inserted_money_by_user(self):
""" Getter zwracający poprzednio dodany czas """
return self.__inserted_money_by_user
@inserted_money_by_user.setter
def inserted_money_by_user(self, inserted_money_by_user):
""" SSetter ustawiający poprzednio dodany czas """
self.__inserted_money_by_user = inserted_money_by_user
def main_loop(self):
""" Nieskończona pętla służąca do uruchomienia aplikacji trwająca, dopóki okno nie zostanie zamknięte """
self.window.mainloop()
def buttons_onclick(self):
""" Metoda obsługująca wydarzenia, gdy przycisk zostanie wciśnięty """
self.interface.window.button1.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[0]))
self.interface.window.button2.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[1]))
self.interface.window.button3.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[2]))
self.interface.window.button4.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[3]))
self.interface.window.button5.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[4]))
self.interface.window.button6.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[5]))
self.interface.window.button7.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[6]))
self.interface.window.button8.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[7]))
self.interface.window.button9.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[8]))
self.interface.window.button10.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[9]))
self.interface.window.button11.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[10]))
self.interface.window.button12.bind("<ButtonRelease-1>", lambda event: self.add_number_of_money(self.moneyHolder.available_values[11]))
self.bind_button_confirm(lambda event: self.button_confirm(event))
self.bind_change_actual_time(lambda event: self.button_change_actual_time(event))
def actual_date(self):
""" Metoda aktualizująca aktualną datę parkomatu oraz datę wyjazdu"""
self.global_date = self.global_date + timedelta(seconds=1) # dodanie sekundy do aktualnej daty parkomatu
self.departure_time = self.departure_time + timedelta(seconds=1) # dodanie sekundy do daty wyjazdu
# wyświetlenie aktualnej daty parkomatu
self.interface.window.actual_date_label.config(text=self.global_date.strftime("%Y-%m-%d %H:%M"))
# wyświetlenie daty wyjazdu
self.interface.window.date_of_departure_label.config(text=self.departure_time.strftime("%Y-%m-%d %H:%M"))
# powtarzanie funkcji actual_date() co sekundę
self.interface.window.actual_date_label.after(1000, self.actual_date)
def button_confirm(self, event):
""" Funkcja odpowiadająca na naciśnięcie przycisku 'Zatwierdź' """
try:
self.confirm()
except Exception as err:
messagebox.showerror("Błąd", str(err))
def button_change_actual_time(self, event):
""" Funkcja odpowiadająca na naciśnięcie przycisku zmieniającego godzinę """
try:
self.change_actual_time()
except Exception as err:
messagebox.showerror("Błąd", str(err))
def bind_button_confirm(self, f):
""" Funkcja bindująca przycisk 'Zatwierdź' """
self.interface.window.confirm_button.bind("<ButtonRelease-1>", f)
def bind_change_actual_time(self, f):
""" Funkcja bindująca przycisk 'Przestaw' """
self.interface.window.change_actual_date_button.bind("<ButtonRelease-1>", f)
def change_actual_time(self):
""" Metoda ustawiająca godzinę wprowadzoną przez użytkownika """
# sprawdzenie, czy wpisano poprawnie czas
if self.inserted_money_by_user != Decimal("0.00"):
messagebox.showerror("Error", "Nie można zmienić czasu, gdy wrzucono już pieniądze.")
else:
if self.interface.window.hour_entry.get().isdigit() is False or self.interface.window.minute_entry.get().isdigit() is False or int(
self.interface.window.hour_entry.get()) < 0 or int(
self.interface.window.hour_entry.get()) > 23 or int(
self.interface.window.minute_entry.get()) < 0 or int(
self.interface.window.minute_entry.get()) > 59:
raise IncorrectTime("Wpisano niepoprawny czas.")
else:
h1 = int(self.interface.window.hour_entry.get()) # pobranie godziny z entry i przekonwertowanie na int
m1 = int(self.interface.window.minute_entry.get()) # pobranie minuty z entry i przekonwertowanie na int
self.global_date = self.global_date.replace(hour=h1, minute=m1) # ustawienie nowego czasy dla parkomatu
self.departure_time = self.global_date # przypisanie aktualnej daty parkomatu do daty wyjazdu
self.previous_time = 0 # reset wcześniejszego czasu, gdy zmieniamy czas
def add_number_of_money(self, value: Decimal):
""" Metoda dodająca wybraną liczbę monet """
number_of_money = self.interface.window.number_of_money_entry.get() # pobranie wprowadzonej liczby monet
try:
if self.interface.window.number_of_money_entry == "" or number_of_money.isdigit() is False: # jeśli nie wpisano wartości lub nie jest liczbą
raise IncorrectValueError
else: # w przeciwnym wypadku
number_of_money = int(number_of_money)
if value < 10: # jeśli wartość pieniądza wynosi poniżej 10 to tworzymy monetę
for x in range(number_of_money):
self.moneyHolder.add_money(money.Coin(value)) # dodanie monety do przechowywacza
self.inserted_money_by_user += value # dodanie wartości monety do aktualnie wrzuconych przez użytkownika
else: # w przeciwnym wypadku tworzymy banknoty
for x in range(number_of_money):
self.moneyHolder.add_money(money.Bill(value)) # dodanie banknotu do przechowywacza
self.inserted_money_by_user += value # dodanie wartości banknotu do aktualnie wrzuconych przez użytkownika
except IncorrectValueError: # przechwycenie wyjątku dla niepoprawnie wpisanej wartości
messagebox.showerror("Error", "Wpisz poprawną liczbę pieniędzy którą chcesz wrzucić.")
except TooMuchCoinsError as err: # przechwycenie wyjątku, jeśli przekroczono limit nominałów
messagebox.showerror("Error", str(err))
finally: # aktualizacja wrzuconej kwoty oraz daty wyjazdu
self.interface.window.sum_of_money_label.config(text=self.inserted_money_by_user) # wrzucona kwota
self.departure_date() # aktualizacja daty wyjazdu
def input_validator(self):
""" Metoda walidująca numer rejestracyjny """
# porównanie numeru do wyrażenia regularnego
pattern = re.match("^[A-Z0-9]+$", self.interface.window.registration_number_entry.get())
if self.interface.window.registration_number_entry.get() == "": # błąd jeśli nie wpisano numeru rejestracyjnego
raise RegistrationNumberError("Wpisz numer rejestracyjny.")
elif bool(pattern) is False: # błąd, jeśli numer nie pasuje do wyrażenia regularnego
raise RegistrationNumberError("Numer rejestracyjny może składać się tylko z wielkich liter od A do Z i cyfr")
def confirmation_of_payment(self):
""" Metoda wyświetlająca okno z potwierdzeniem opłacenia parkingu """
messagebox.showinfo("Potwierdzenie opłacenia parkingu",
"Numer rejestracyjny: {} \n\nCzas zakupu: {} \n\nTermin wyjazdu: {}"
.format(self.interface.window.registration_number_entry.get(),
self.interface.window.actual_date_label.cget("text"),
self.interface.window.date_of_departure_label.cget("text")))
def rules(self, departure_date, seconds):
""" Zasady strefy płatnego parkowania obowiązuje w godzinach od 8 do 20 od poniedziałku do piątku """
rr = rrule(SECONDLY, byweekday=(MO, TU, WE, TH, FR), byhour=(8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
dtstart=departure_date, interval=seconds)
return rr.after(departure_date)
def seconds_for_money(self, amount: Decimal):
""" Metoda zwracająca liczbę sekund dla wrzuconych pieniędzy """
total_seconds = 0 # zmienna przechowująca sumę dodanych sekund
grosz_1h = 60 * 60 / 200 # sekunda za jednego grosza pierwszej godziny
grosz_2h = 60 * 60 / 400 # sekunda za jednego grosza drugiej godziny
grosz_xh = 60 * 60 / 500 # sekunda za jednego grosza lub większej godziny
if total_seconds < 3600: # jeśli suma sekund jest mniejsza od godziny zapisanej w sekundach
if amount >= 2: # jeśli suma jest większa lub równa 2
total_seconds += 3600 # dodaj godzinę
amount -= 2 # odejmij od sumy koszt jednej godziny
else:
seconds = amount * 100 * Decimal(grosz_1h) # obliczona liczba sekund
total_seconds += seconds # dodanie obliczonych sekund do całościowej liczby sekund
amount = 0 # zerowanie sumy
if total_seconds < 7200: # jeśli suma sekund jest mniejsza od dwóch godzin zapisanej w sekundach
if amount >= 4: # jeśli suma jest większa lub równa 4
total_seconds += 3600 # dodaj godzinę
amount -= 4 # odejmij od sumy koszt jednej godziny
else:
seconds = amount * 100 * Decimal(grosz_2h) # obliczona liczba sekund
total_seconds += seconds # dodanie obliczonych sekund do całościowej liczby sekund
amount = 0 # zerowanie sumy
while amount > 0: # wykonuj, dopóki suma wrzuconych pieniędzy jest większa od zera
if total_seconds >= 7200: # jeśli suma sekund jest większa lub równa dwóch godzin zapisanej w sekundach
if amount >= 5: # jeśli suma jest większa lub równa 5
total_seconds += math.floor((amount / 5)) * 60 * 60 # dodanie całkowitej liczby godzin
amount -= 5 * math.floor((amount / 5)) # odjęcia całkowitej liczby godzin od sumy
else:
seconds = amount * 100 * Decimal(grosz_xh) # obliczona liczba sekund
total_seconds += seconds # dodanie obliczonych sekund do całościowej liczby sekund
amount = 0 # zerowanie sumy
temp_seconds = total_seconds
total_seconds -= self.previous_time # od całkowitego czasu odjęcie wcześniejszego
self.previous_time = temp_seconds # ustawienie nowego wcześniejszego czasu
return int(total_seconds)
def departure_date(self):
""" Metoda ustawiająca datę wyjazdu """
free_hours = [x for x in range(0, 24) if x not in range(8, 20)] # lista z darmowymi godzinami
amount = self.inserted_money_by_user # suma przechowywanych pieniędzy
seconds_paid = self.seconds_for_money(amount) # liczba zapłaconych sekund
if seconds_paid > 0: # jeśli liczba zapłaconych sekund jest większa od zera
if self.departure_time.weekday() == 5: # jeśli jest sobota
self.departure_time = self.departure_time.replace(hour=8, minute=00) + timedelta(days=2)
elif self.departure_time.weekday() == 6: # jeśli jest niedziela
self.departure_time = self.departure_time.replace(hour=8, minute=00) + timedelta(days=1)
elif self.departure_time.hour in free_hours: # jeśli są dni robocze i aktualna godzina jest darmowa
if self.departure_time.hour > 19: # jeśli jest po godzinie 19:00
self.departure_time = self.departure_time.replace(hour=8, minute=00) + timedelta(days=1)
else: # jeśli jest godzina między 0 a 8
self.departure_time = self.departure_time.replace(hour=8, minute=00)
# wyświetlenie w label zaktualizowanej daty wyjazdu
self.departure_time = self.rules(self.departure_time, seconds_paid)
self.interface.window.date_of_departure_label.config(text=self.departure_time.strftime("%Y-%m-%d %H:%M"))
def confirm(self):
""" Funkcja włączająca się przy kliknięciu przycisku 'Zatwierdź' """
self.input_validator() # sprawdzenie walidacji numeru rejestracyjnego
if self.inserted_money_by_user > 0: # wykonanie, jeśli suma monet jest większa od 0
self.confirmation_of_payment() # wykonanie funkcji potwierdzającej płatność
self.reset() # po potwierdzeniu rezerwacji reset parkomatu do stanu początkowego
else: # w przeciwnym wypadku wyświetl błąd
raise NotInsertedMoney("Nie wrzucono pieniędzy.")
def reset(self):
""" Funkcja resetująca parkomat do stanu początkowego """
self.interface.window.registration_number_entry.delete(0, "end") # reset pola z numerem rejestracyjnym
self.interface.window.sum_of_money_label.config(text="0.00") # reset pola z wrzuconymi pieniędzmi
self.interface.window.date_of_departure_label.config(text="") # reset pola z datą wyjazdu
self.global_date = datetime.now() # reset czasu parkomatu do stanu początkowego
self.departure_time = self.global_date # ustawienie z powrotem czasu wyjazdy do stanu początkowego
self.interface.window.number_of_money_entry.delete(0, "end") # reset pola z liczbą monet
self.interface.window.number_of_money_entry.insert(0, "1") # wpisanie domyślnej wartości
self.interface.window.hour_entry.delete(0, "end") # reset entry z godziną
self.interface.window.hour_entry.insert(0, "0") # wpisanie domyślnej wartości
self.interface.window.minute_entry.delete(0, "end") # reset entry z minutą
self.interface.window.minute_entry.insert(0, "0") # wpisanie domyślnej wartości
self.previous_time = 0 # reset poprzednio dodanego czasu
self.inserted_money_by_user = Decimal("0.00") # reset wrzuconych pieniędzy dla użytkownika
| DZietara/parkomat | main/parkomat_functions.py | parkomat_functions.py | py | 18,188 | python | pl | code | 0 | github-code | 6 | [
{
"api_name": "datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "parkomat_interface.ParkomatInterface",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tki... |
26538911991 | import hashlib
import os.path
from typing import List, Optional
import requests
from connectors.Triage.const import TRIAGE_URL, TRIAGE_LAST_100_RESULTS_FROM_NOW, TRIAGE_HEADER, OUTPUT_FOLDER
from connectors.utils import upload_file_to_malstream
def get_last_100_analysis() -> List:
r = requests.get(f"{TRIAGE_URL}{TRIAGE_LAST_100_RESULTS_FROM_NOW}", headers=TRIAGE_HEADER)
if r.status_code != 200:
return []
return r.json()['data']
def download_file(_id: str) -> Optional[str]:
r = requests.get(f"{TRIAGE_URL}/samples/{_id}/sample", headers=TRIAGE_HEADER)
if r.status_code != 200:
return None
file_path = os.path.join(OUTPUT_FOLDER, hashlib.sha256(r.content).hexdigest())
with open(file_path, 'wb') as f:
f.write(r.content)
return file_path
def main():
res = get_last_100_analysis()
for r in res:
file_path = download_file(r['id'])
if not file_path:
print(f'Error while download sample {r["id"]}')
continue
status_code = upload_file_to_malstream(file_path)
if status_code != 200 and status_code != 409:
print(f'Error on upload {file_path}')
print(f"Cleaning extracted file {OUTPUT_FOLDER}")
for f in os.listdir(OUTPUT_FOLDER):
os.remove(os.path.join(OUTPUT_FOLDER, f))
if __name__ == '__main__':
main()
| CorraMatte/malstream | connectors/Triage/connector.py | connector.py | py | 1,374 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "connectors.Triage.const.TRIAGE_URL",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "connectors.Triage.const.TRIAGE_LAST_100_RESULTS_FROM_NOW",
"line_number": 12,
"usage_type... |
5292164947 | #right now, I am using this script to play around w/ differnt definitions of signal and control regions
import ROOT
from TIMBER.Analyzer import HistGroup, CutGroup
from TIMBER.Tools.Common import CompileCpp
from argparse import ArgumentParser
from XHYbbWW_class import XHYbbWW
from collections import OrderedDict
def KinematicLepton(self): #bringing this function in here so I can select lepton w/o quality/isolation cuts
self.a.Define('kinEleIdx','kinElectron(Electron_pt,Electron_eta,Electron_phi,Higgs_phi,Wqq_phi)')
self.a.Define('kinMuIdx','kinMuon(Muon_pt,Muon_eta,Muon_phi,Higgs_phi,Wqq_phi)')
self.a.Cut('kinLepton_cut','kinEleIdx != -1 || kinMuIdx != -1') #at least one good lepton
self.a.Define('LeptonType','LeptonIdx(kinEleIdx,kinMuIdx,Electron_pt,Muon_pt)') #picks higher pt signal lepton - output = 0 (lepton is electron) or 1 (lepton is muon)
self.SIGLEP = self.getNweighted()
self.AddCutflowColumn(self.SIGLEP,'SIGLEP')
#For ease, merge some lepton columns that will be useful later (for lepton-type specific variables, use LeptonType to determine if electron or muon)
self.a.Define('Lepton_pt','LeptonType == 1 ? Muon_pt[kinMuIdx] : Electron_pt[kinEleIdx]')
self.a.Define('Lepton_eta','LeptonType == 1 ? Muon_eta[kinMuIdx] : Electron_eta[kinEleIdx]')
self.a.Define('Lepton_phi','LeptonType == 1 ? Muon_phi[kinMuIdx] : Electron_phi[kinEleIdx]')
self.a.Define('Lepton_mass','LeptonType == 1 ? Muon_mass[kinMuIdx] : Electron_mass[kinEleIdx]')
return self.a.GetActiveNode()
def MXvsMY_studies(self):
##### NEW VARIABLES FOR LATER USE #####
#W_leptonic transverse mass
self.a.Define('W_massTran','TransverseMass(MET_pt,Lepton_pt,MET_phi,Lepton_phi)') #Transverse W mass
# self.a.Define('W_massTran_genMET','TransverseMass(MET_fiducialGenPt,Lepton_pt,MET_fiducialGenPhi,Lepton_phi)') #using generator-level MET variables
#Lorentz 4-vectors
self.a.Define('MET_vect','hardware::TLvector(MET_pt,0,MET_phi,0)') #neutrino mass negligable, for now assuming MET_eta = 0 (p_z = 0)
self.a.Define('Lepton_vect','hardware::TLvector(Lepton_pt,Lepton_eta,Lepton_phi,Lepton_mass)')
self.a.Define('Wqq_vect','hardware::TLvector(Wqq_pt,Wqq_eta,Wqq_phi,Wqq_msoftdrop)')
self.a.Define('Hbb_vect','hardware::TLvector(Higgs_pt,Higgs_eta,Higgs_phi,Higgs_msoftdrop)')
#Invariant masses of W/Y/X
self.a.Define('W_massInv','hardware::InvariantMass({MET_vect,Lepton_vect})') #full invariant mass
self.a.Define('Y_mass','hardware::InvariantMass({Lepton_vect,MET_vect,Wqq_vect})')
self.a.Define('X_mass','hardware::InvariantMass({Lepton_vect,MET_vect,Wqq_vect,Hbb_vect})')
studiesPlots = HistGroup('studiesPlots')
#######################################
#First lets make some plots examining the lepton quality cuts in the different MC samples
#Muon_mediumId, Electron_mvaFall17V2noIso vs eta, Electron_mvaFall17V2noIso_WP80, Electron_mvaFall17V2noIso_WP90, Electron_mvaFall17V2noIso_WPL
start=self.a.GetActiveNode()
muonEvents=self.a.Cut('Muon_events','LeptonType == 1')
self.a.SetActiveNode(muonEvents)
self.a.ObjectFromCollection('kinMu','Muon','kinMuIdx')
#studiesPlots.Add('kinMu_mediumId',self.a.GetActiveNode().DataFrame.Histo1D(('kinMu_mediumId','kinMu_mediumId',2,0,2),'kinMu_mediumId','weight__nominal')) #bins may not work
self.a.SetActiveNode(start)
electronEvents=self.a.Cut('Electron_events','LeptonType == 0')
self.a.SetActiveNode(electronEvents)
self.a.ObjectFromCollection('kinEle','Electron','kinEleIdx')
#studiesPlots.Add('kinEle_mvaFall17V2noIso vs eta',self.a.DataFrame.Histo2D(('kinEle_mvaFall17V2noIso vs eta','kinEle_mvaFall17V2noIso vs eta',1000,0,1,250,0,2.5),'kinEle_mvaFall17V2noIso', 'kinEle_eta','weight__nominal'))
#Make three plots for electron mva (for different etas/ECAL regions)
no_eta = self.a.GetActiveNode()
inner_barrel = self.a.Cut('inner_barrel','abs(kinEle_eta) < 0.8')
self.a.SetActiveNode(inner_barrel)
studiesPlots.Add('kinEle_mvaFall17V2noIso (inner barrel)',self.a.DataFrame.Histo1D(('kinEle_mvaFall17V2noIso (|eta| < 0.8)','kinEle_mvaFall17V2noIso (inner barrel - |eta| < 0.8)',100,0,1),'kinEle_mvaFall17V2noIso', 'weight__nominal'))
self.a.SetActiveNode(no_eta)
outer_barrel = self.a.Cut('outer_barrel','abs(kinEle_eta) > 0.8 && abs(kinEle_eta) < 1.479')
self.a.SetActiveNode(outer_barrel)
studiesPlots.Add('kinEle_mvaFall17V2noIso (outer barrel)',self.a.DataFrame.Histo1D(('kinEle_mvaFall17V2noIso (0.8 < |eta| < 1.479)','kinEle_mvaFall17V2noIso (outer barrel - 0.8 < |eta| < 1.479)',100,0,1),'kinEle_mvaFall17V2noIso', 'weight__nominal'))
self.a.SetActiveNode(no_eta)
endcap = self.a.Cut('endcap','abs(kinEle_eta) > 1.479 && abs(kinEle_eta) < 2.5')
self.a.SetActiveNode(endcap)
studiesPlots.Add('kinEle_mvaFall17V2noIso (endcap)',self.a.DataFrame.Histo1D(('kinEle_mvaFall17V2noIso (1.479 < |eta| < 2.5)','kinEle_mvaFall17V2noIso (endcap - 1.479 < |eta| < 2.5)',100,0,1),'kinEle_mvaFall17V2noIso', 'weight__nominal'))
'''
studiesPlots.Add('kinEle_mvaFall17V2noIso_WP80',self.a.GetActiveNode().DataFrame.Histo1D(('kinEle_mvaFall17V2noIso_WP80','kinEle_mvaFall17V2noIso_WP80',2,0,2),'kinEle_mvaFall17V2noIso_WP80','weight__nominal').GetValue())
print('kinele_mvaWP80 plot made')
studiesPlots.Add('kinEle_mvaFall17V2noIso_WP90',self.a.GetActiveNode().DataFrame.Histo1D(('kinEle_mvaFall17V2noIso_WP90','kinEle_mvaFall17V2noIso_WP90',2,0,2),'kinEle_mvaFall17V2noIso_WP90','weight__nominal').GetValue())
print('kinele_mvaWP90 plot made')
studiesPlots.Add('kinEle_mvaFall17V2noIso_WPL',self.a.GetActiveNode().DataFrame.Histo1D(('kinEle_mvaFall17V2noIso_WPL','kinEle_mvaFall17V2noIso_WPL',2,0,2),'kinEle_mvaFall17V2noIso_WPL','weight__nominal').GetValue())
print('kinele_mvaWPL plot made')
'''
self.a.SetActiveNode(start)
taggers = ['particleNetMD']
# now we want to plot mX vs mY for QCD, ttbar, and signal
for t in taggers:
self.ApplyMassCuts()
start=self.a.GetActiveNode()
# We use Wqq tagging scores to divide data into two regions: signal (enriched in signal) and control (enriched in background)
# - Signal: Wqq > 0.8, pass lepton medium ID
# - Control: Wqq < 0.8, fail lepton medium ID
# We define a pass/fail criteria for the Hbb score within each region
# - Region 1 (fail): Hbb < 0.94
# - Region 2 (pass): Hbb > 0.94
SR=self.ApplySRorCR('SR',t)
SR_FP=self.ApplyPassFail('SR',t)
self.a.SetActiveNode(start)
CR=self.ApplySRorCR('CR',t)
CR_FP=self.ApplyPassFail('CR',t)
nodes=OrderedDict()
nodes.update(SR_FP)
nodes.update(CR_FP)
bins = [80,0,4500]
for node in nodes.keys():
self.a.SetActiveNode(nodes[node])
print('MX vs MY: Plotting for {}'.format(node))
studiesPlots.Add('MXvsMY_{}'.format(node), self.a.DataFrame.Histo2D(('MXvsMY_{}'.format(node), 'X vs Y Invariant Mass - {} {}'.format(node.split('_')[1],node.split('_')[0]), bins[0], bins[1], bins[2], bins[0], bins[1], bins[2]), 'X_mass', 'Y_mass', 'weight__nominal'))
outFile = ROOT.TFile.Open('{}_{}_{}_MXvsMYstudies.root'.format(self.setname,self.year,self.ijob),'RECREATE')
outFile.cd()
studiesPlots.Do('Write')
#self.a.PrintNodeTree('NodeTree.pdf',verbose=True)
outFile.Close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-s', type=str, dest='setname',
action='store',help='name of data set to run on')
parser.add_argument('-y', type=str, dest='year',
action='store', help='year',required=False)
parser.add_argument('-j', type=int, dest='ijob',required=False,
action='store', help='current job')
parser.add_argument('-n', type=int, dest='njobs',required=False,
action='store', help='number of jobs')
args = parser.parse_args()
setname=args.setname
year=args.year
ijob=args.ijob
njobs=args.njobs
filename='snapshots/{}_{}_snapshot.txt'.format(setname,year)
ana = XHYbbWW(filename,ijob,njobs)
# ana.ApplyStandardCorrections(post_snapshot=True)
ana.Dijets()
KinematicLepton(ana)
MXvsMY_studies(ana)
| michaelhesford/XHYbbWW_semileptonic | MXvsMY_studies.py | MXvsMY_studies.py | py | 8,394 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "TIMBER.Analyzer.HistGroup",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "ROOT.TFile.Open",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "R... |
18464680736 | #!/usr/bin/python3
#encoding: utf-8
import requests
import re
from bs4 import BeautifulSoup
import json
#登录获取cookie
login_url = "http://210.30.1.140/index.php/Public/checkLogin"
#登录信息
logindata={
"txtName":"2015083216",
"txtPass":"2015083216",
"txtCheck":"no",
}
#获取cookie
logind = requests.post(login_url,data=logindata)
cookie = logind.cookies
#提交题目
d = {
"submit_language":"1",
"submit_code":"#include <iostream> \n using namespace std;\n int main()\n{int a,b;cin>>a>>b; cout<<a+b<<endl; return 0;}",
"problem_id":"303",
"test_id":"",
"__hash__":"a8edbf0347b55fdb7b7567c1505c15b1_d0ad44986cc057b42f6762993b550404"
}
url = "http://210.30.1.140/index.php/Problems/saveCode"
for i in range(1,3): #循环填写请求的次数
r = requests.post(url, data=d,cookies=cookie)
print(r.text) #返回请求后的内容
'''
requests post请求参考资料:http://blog.csdn.net/junli_chen/article/details/53670887
form形式
json形式
multipat形式
'''
| chinazhenzhen/PythonLearn | RE4/5+.py | 5+.py | py | 1,019 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 38,
"usage_type": "call"
}
] |
15260123974 | import datetime
import hashlib
import json
from flask import Flask, jsonify
# Building a Blockchain
class Blockchain:
def __init__(self):
"""
Create Blockchain and a genesis block
"""
self.chain = []
self.create_block(proof=1, previous_hash='0')
def create_block(self, proof, previous_hash):
"""
:param proof: Proof of new block
:param previous_hash: hash of the previous block in Blockchain
:return: newly created block
"""
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash}
self.chain.append(block)
return block
def get_previous_block(self):
"""
:return: Last block of Blockchain
"""
return self.chain[-1]
def proof_of_work(self, previous_proof):
"""
:param previous_proof: hash of the previous block in Blockchain
:return: proof on new block
"""
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str((new_proof ** 2) - (previous_proof ** 2)).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
"""
:param block: A block in a Blockchain
:return: hash of the block
"""
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
"""
:param chain: list of blocks in Blockchain
:return: True if chain is valid, otherwise False
"""
block_index = 1
previous_block = chain[0]
while block_index < len(chain):
block = chain[block_index]
# Checks if previous_hash of current block is equal to hash of previous block
if block['previous_hash'] != self.hash(previous_block):
return False
# Check if proof of current block satisfies the 4 zeroes condition or not
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str((proof ** 2) - (previous_proof ** 2)).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# Creating a web app
app = Flask(__name__)
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash']}
return jsonify(response), 200
# Getting the full Blockchain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
@app.route('/is_valid', methods=['GET'])
def is_valid():
if blockchain.is_chain_valid(blockchain.chain):
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'We have a problem. The Blockchain is not valid'}
return jsonify(response), 200
# Running the app
app.run(host='0.0.0.0', port=1710)
| imnishant/Blockchain | main.py | main.py | py | 3,966 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "hashlib.sha256",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "json.dump... |
71916477308 | # coding:utf-8
from PyQt5.QtWidgets import QWidget, QGridLayout, QVBoxLayout, QSizePolicy, QListWidgetItem, QAbstractItemView
from PyQt5.QtCore import pyqtSignal
from qfluentwidgets import ListWidget, PrimaryPushButton, PillPushButton, FluentIcon, InfoBar
from common.style_sheet import StyleSheet
from common.config import cfg
from globalvar.vars import set_value, get_value
from threads.pkthread import PKThread
from utils.logger import Logger
from view.frame import Frame
from view.widget.operate_toolbar import OperateToolBar
class OperateInterface(QWidget):
""" View interface """
calculate_started_signal = pyqtSignal(str)
calculate_finished_signal = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName("operate_interface")
self.widget = QWidget(self)
self.gridLayout = QGridLayout(self)
self.verticalLayout = QVBoxLayout()
self.toolBar = OperateToolBar(self.tr("Variables"), self.tr("Calculate and compare the PK values of the variables."), self)
self.listWidget_all = ListWidget(self)
self.listWidget_y = ListWidget(self)
self.listWidget_x = ListWidget(self)
self.frame_all = Frame(self)
self.frame_x = Frame(self)
self.frame_y = Frame(self)
self.pushButton_y = PrimaryPushButton(self.tr("Add"), self)
self.pushButton_all = PrimaryPushButton(self.tr("Add all"), self)
self.pushButton_x = PrimaryPushButton(self.tr("Add"), self)
self.variables_all = PillPushButton(self.tr("Available variables"), self, FluentIcon.TAG)
self.variables_x = PillPushButton(self.tr("Test variables"), self, FluentIcon.TAG)
self.variables_y = PillPushButton(self.tr("Independent variables"), self, FluentIcon.TAG)
self.list_all = []
self.add_to_x = True
self.add_to_y = True
self.pkThread = PKThread()
self.logger = Logger().get_logger()
self.__initWidget()
self.__initListWidgets()
self.__initConnects()
def __initWidget(self):
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listWidget_y.sizePolicy().hasHeightForWidth())
self.listWidget_y.setSizePolicy(sizePolicy)
self.frame_all.addWidget(self.listWidget_all)
self.frame_x.addWidget(self.listWidget_x)
self.frame_y.addWidget(self.listWidget_y)
self.gridLayout.addWidget(self.toolBar, 0, 0, 1, 3)
self.gridLayout.addWidget(self.frame_all, 2, 0, 4, 1)
self.gridLayout.addWidget(self.pushButton_y, 3, 1, 1, 1)
self.gridLayout.addWidget(self.frame_y, 3, 2, 1, 1)
self.gridLayout.addWidget(self.variables_all, 1, 0, 1, 1)
self.gridLayout.addWidget(self.variables_y, 1, 2, 1, 1)
self.gridLayout.addWidget(self.variables_x, 4, 2, 1, 1)
self.verticalLayout.addWidget(self.pushButton_all)
self.verticalLayout.addWidget(self.pushButton_x)
self.gridLayout.addLayout(self.verticalLayout, 5, 1, 1, 1)
self.gridLayout.addWidget(self.frame_x, 5, 2, 1, 1)
self.variables_all.setCheckable(False)
self.variables_y.setCheckable(False)
self.variables_x.setCheckable(False)
self.listWidget_all.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.listWidget_x.setSelectionMode(QAbstractItemView.ExtendedSelection)
StyleSheet.GALLERY_INTERFACE.apply(self)
def __initListWidgets(self):
self.pushButton_x.setEnabled(False)
self.pushButton_y.setEnabled(False)
self.pushButton_all.setEnabled(False)
self.resetLists()
def __initButtons(self):
self.pushButton_x.setEnabled(True)
self.pushButton_y.setEnabled(True)
self.pushButton_all.setEnabled(True)
self.pushButton_x.setText(self.tr("Add"))
self.pushButton_y.setText(self.tr("Add"))
self.pushButton_all.setText(self.tr("Add all"))
self.add_to_x = True
self.add_to_y = True
def enbaleAllButtons(self, enabled):
self.pushButton_all.setEnabled(enabled)
self.pushButton_x.setEnabled(enabled)
self.pushButton_y.setEnabled(enabled)
self.toolBar.compareButton.setEnabled(enabled)
self.toolBar.calcaulateButton.setEnabled(enabled)
self.toolBar.resetButton.setEnabled(enabled)
def __initConnects(self):
self.toolBar.resetButton.clicked.connect(self.resetLists)
self.listWidget_all.clicked.connect(self.__initButtons)
self.listWidget_x.clicked.connect(self.remove_from_x)
self.listWidget_y.clicked.connect(self.remove_from_y)
self.pushButton_x.clicked.connect(self.clicked_button_x)
self.pushButton_y.clicked.connect(self.clicked_button_y)
self.pushButton_all.clicked.connect(self.clicked_button_all)
self.toolBar.calcaulateButton.clicked.connect(self.calculate)
self.toolBar.compareButton.clicked.connect(self.compare)
self.pkThread.finished_signal.connect(self.calculate_compare_finished)
self.pkThread.error_signal.connect(self.error_occurred)
self.pkThread.warn_signal.connect(self.warn_occurred)
self.pkThread.success_signal.connect(self.success_occurred)
self.pkThread.task_percentage_changed_signal.connect(self.toolBar.progressbar.setValue)
self.pkThread.task_percentage_changed_signal.connect(self.toolBar.update_percentage)
def resetLists(self):
self.setList(self.listWidget_x, [])
self.setList(self.listWidget_y, [])
self.setList(self.listWidget_all, self.list_all)
def setList(self, list_widget, list_content):
while list_widget.count() > 0:
list_widget.takeItem(0)
for content in list_content:
if not isinstance(content, str):
content = str(content)
list_widget.addItem(QListWidgetItem(content))
list_widget.clearSelection()
def updateList(self):
df = get_value("current_workbook")
self.list_all = df.columns
self.resetLists()
self.__initButtons()
set_value("pk", None)
set_value("pk_dict", {})
set_value("pk_name_dict", {})
set_value("pk_n", 0)
set_value("pks", None)
set_value("pks_dict", {})
set_value("pks_name_dict", {})
set_value("pks_n", 0)
self.logger.info(self.tr("Update the available variables in the data. All the storage cache has been reset."))
self.logger.info(self.tr("The available variables list as {0}").format(self.list_all))
def remove_from_x(self):
self.pushButton_x.setText(self.tr("Remove"))
self.pushButton_all.setText(self.tr("Remove all"))
self.add_to_x = False
def remove_from_y(self):
self.pushButton_y.setText(self.tr("Remove"))
self.add_to_y = False
def exchange_selected(self, source, destination):
selected = source.selectedIndexes()
idx = [x.row() for x in selected]
idx.sort(reverse=True)
for num in idx:
it = source.takeItem(num)
destination.addItem(it)
source.clearSelection()
destination.clearSelection()
def remove_all(self, source, destination):
while source.count() > 0 :
it = source.takeItem(0)
destination.addItem(it)
def clicked_button_x(self):
if self.add_to_x:
self.exchange_selected(self.listWidget_all, self.listWidget_x)
else:
self.exchange_selected(self.listWidget_x, self.listWidget_all)
def clicked_button_y(self):
if self.add_to_y:
if self.listWidget_y.count() == 0 and len(self.listWidget_all.selectedItems()) == 1:
self.exchange_selected(self.listWidget_all, self.listWidget_y)
else:
pass
else:
self.exchange_selected(self.listWidget_y, self.listWidget_all)
def clicked_button_all(self):
if self.add_to_x:
self.remove_all(self.listWidget_all, self.listWidget_x)
else:
self.remove_all(self.listWidget_x, self.listWidget_all)
def collect_xy(self):
x = []
y = []
n = self.listWidget_x.count()
for i in range(n):
x.append(self.listWidget_x.item(i).text())
n = self.listWidget_y.count()
for i in range(n):
y.append(self.listWidget_y.item(i).text())
set_value("x_names", x)
set_value("y_names", y)
set_value("output_dir", cfg.get(cfg.outputFolder))
self.logger.info(self.tr("The test variables include the following:"))
self.logger.info(x)
self.logger.info(self.tr("The independent variable includes the following:"))
self.logger.info(y)
def calculate(self):
self.logger.info(self.tr("Start calculating PKs."))
self.toolBar.textButton.setText(self.tr("Calculating"))
self.enbaleAllButtons(False)
self.collect_xy()
self.pkThread.set_work_type("PK")
self.pkThread.start()
self.calculate_started_signal.emit(self.tr("Calculating PKs"))
def compare(self):
self.logger.info(self.tr("Start comparing PKs."))
self.toolBar.textButton.setText(self.tr("Comparing"))
self.enbaleAllButtons(False)
self.collect_xy()
self.pkThread.set_work_type("PKC")
self.pkThread.start()
self.calculate_started_signal.emit(self.tr("Comparing PKs"))
def calculate_compare_finished(self):
self.enbaleAllButtons(True)
self.toolBar.createTopLeftInfoBar(self.tr("Success!"), self.tr("The operation success and write the results to files finished!! Please refer the output for details."), InfoBar.success, 2000)
self.calculate_finished_signal.emit(self.tr("Open the file {0}").format(get_value("last_work_file")))
def success_occurred(self, str):
self.toolBar.createTopLeftInfoBar(self.tr("Success!"), str, InfoBar.success)
def error_occurred(self, str):
self.enbaleAllButtons(True)
self.toolBar.createTopRightInfoBar(self.tr("Error!"), str,InfoBar.error)
def warn_occurred(self, str):
self.toolBar.createTopRightInfoBar(self.tr("Warn!"), str,InfoBar.warning)
| xfz329/pk4adi_calculator | view/operate_interface.py | operate_interface.py | py | 10,462 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 19,
"usage_type": "call"
},
{
"api_name"... |
72532296829 | # pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
from contextlib import contextmanager
from typing import Any, AsyncIterable, Callable, Iterator
from unittest.mock import AsyncMock
import pytest
from faker import Faker
from fastapi import FastAPI, status
from httpx import HTTPError, Response
from models_library.sidecar_volumes import VolumeCategory, VolumeStatus
from pydantic import AnyHttpUrl, parse_obj_as
from pytest import LogCaptureFixture, MonkeyPatch
from pytest_mock import MockerFixture
from pytest_simcore.helpers.typing_env import EnvVarsDict
from simcore_service_director_v2.core.settings import AppSettings
from simcore_service_director_v2.modules.dynamic_sidecar.api_client._errors import (
ClientHttpError,
UnexpectedStatusError,
)
from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import (
SidecarsClient,
get_sidecars_client,
)
from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import (
setup as api_client_setup,
)
from simcore_service_director_v2.modules.dynamic_sidecar.api_client._public import (
shutdown,
)
from simcore_service_director_v2.modules.dynamic_sidecar.errors import (
EntrypointContainerNotFoundError,
)
@pytest.fixture
def dynamic_sidecar_endpoint() -> AnyHttpUrl:
return parse_obj_as(AnyHttpUrl, "http://missing-host:1111")
@pytest.fixture
def mock_env(monkeypatch: MonkeyPatch, mock_env: EnvVarsDict) -> None:
monkeypatch.setenv("S3_ACCESS_KEY", "")
monkeypatch.setenv("S3_SECRET_KEY", "")
monkeypatch.setenv("S3_BUCKET_NAME", "")
monkeypatch.setenv("R_CLONE_PROVIDER", "MINIO")
monkeypatch.setenv("POSTGRES_HOST", "")
monkeypatch.setenv("POSTGRES_USER", "")
monkeypatch.setenv("POSTGRES_PASSWORD", "")
monkeypatch.setenv("POSTGRES_DB", "")
# reduce number of retries to make more reliable
monkeypatch.setenv("DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S", "3")
monkeypatch.setenv("S3_ENDPOINT", "")
@pytest.fixture
async def sidecars_client(
mock_env: EnvVarsDict, faker: Faker
) -> AsyncIterable[SidecarsClient]:
app = FastAPI()
app.state.settings = AppSettings.create_from_envs()
# WARNING: pytest gets confused with 'setup', use instead alias 'api_client_setup'
await api_client_setup(app)
yield get_sidecars_client(app, faker.uuid4())
await shutdown(app)
@pytest.fixture
def request_timeout() -> int:
# below refer to exponential wait step duration
return 1 + 2
@pytest.fixture
def raise_request_timeout(
monkeypatch: MonkeyPatch, request_timeout: int, mock_env: EnvVarsDict
) -> None:
monkeypatch.setenv("DYNAMIC_SIDECAR_CLIENT_REQUEST_TIMEOUT_S", f"{request_timeout}")
@pytest.fixture
def get_patched_client(
sidecars_client: SidecarsClient, mocker: MockerFixture
) -> Callable:
@contextmanager
def wrapper(
method: str,
return_value: Any | None = None,
side_effect: Callable | None = None,
) -> Iterator[SidecarsClient]:
mocker.patch(
f"simcore_service_director_v2.modules.dynamic_sidecar.api_client._thin.ThinSidecarsClient.{method}",
return_value=return_value,
side_effect=side_effect,
)
yield sidecars_client
return wrapper
@pytest.mark.parametrize("is_healthy", [True, False])
@pytest.mark.parametrize("with_retry", [True, False])
async def test_is_healthy(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
is_healthy: bool,
with_retry: bool,
) -> None:
mock_json = {"is_healthy": is_healthy}
with get_patched_client(
"get_health" if with_retry else "get_health_no_retry",
return_value=Response(status_code=status.HTTP_200_OK, json=mock_json),
) as client:
assert (
await client.is_healthy(dynamic_sidecar_endpoint, with_retry=with_retry)
== is_healthy
)
async def test_is_healthy_times_out(
raise_request_timeout: None,
sidecars_client: SidecarsClient,
dynamic_sidecar_endpoint: AnyHttpUrl,
caplog_info_level: LogCaptureFixture,
) -> None:
assert await sidecars_client.is_healthy(dynamic_sidecar_endpoint) is False
# check if the right amount of messages was captured by the logs
unexpected_counter = 1
for log_message in caplog_info_level.messages:
if log_message.startswith("Retrying"):
assert "as it raised" in log_message
continue
assert log_message.startswith(f"Request timed-out after {unexpected_counter}")
unexpected_counter += 1
@pytest.mark.parametrize(
"side_effect",
[
pytest.param(
UnexpectedStatusError(
Response(
status_code=status.HTTP_400_BAD_REQUEST,
content="some mocked error",
request=AsyncMock(),
),
status.HTTP_200_OK,
),
id="UnexpectedStatusError",
),
pytest.param(
ClientHttpError(HTTPError("another mocked error")), id="HTTPError"
),
],
)
async def test_is_healthy_api_error(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
side_effect: Exception,
) -> None:
with get_patched_client(
"get_health",
side_effect=side_effect,
) as client:
assert await client.is_healthy(dynamic_sidecar_endpoint) == False
async def test_containers_inspect(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
mock_json = {"ok": "data"}
with get_patched_client(
"get_containers",
return_value=Response(status_code=status.HTTP_200_OK, json=mock_json),
) as client:
assert await client.containers_inspect(dynamic_sidecar_endpoint) == mock_json
async def test_containers_docker_status_api_ok(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
mock_json = {"container_id": {"ok": "data"}}
with get_patched_client(
"get_containers",
return_value=Response(status_code=status.HTTP_200_OK, json=mock_json),
) as client:
assert (
await client.containers_docker_status(dynamic_sidecar_endpoint) == mock_json
)
async def test_containers_docker_status_api_error(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
with get_patched_client(
"get_containers",
side_effect=UnexpectedStatusError(
Response(
status_code=status.HTTP_400_BAD_REQUEST,
content="some mocked error",
request=AsyncMock(),
),
status.HTTP_200_OK,
),
) as client:
assert await client.containers_docker_status(dynamic_sidecar_endpoint) == {}
async def test_disable_service_ports_io(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
with get_patched_client(
"patch_containers_ports_io",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert await client.disable_service_ports_io(dynamic_sidecar_endpoint) is None
async def test_enable_service_ports_io(
get_patched_client: Callable, dynamic_sidecar_endpoint: AnyHttpUrl
) -> None:
with get_patched_client(
"patch_containers_ports_io",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert await client.enable_service_ports_io(dynamic_sidecar_endpoint) is None
@pytest.mark.parametrize("outputs_labels", [{}, {"ok": "data"}])
async def test_service_outputs_create_dirs(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
outputs_labels: dict[str, Any],
) -> None:
with get_patched_client(
"post_containers_ports_outputs_dirs",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert (
await client.service_outputs_create_dirs(
dynamic_sidecar_endpoint, outputs_labels
)
is None
)
@pytest.mark.parametrize("dynamic_sidecar_network_name", ["a_test_network"])
async def test_get_entrypoint_container_name_ok(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
dynamic_sidecar_network_name: str,
) -> None:
with get_patched_client(
"get_containers_name",
return_value=Response(status_code=status.HTTP_200_OK, json="a_test_container"),
) as client:
assert (
await client.get_entrypoint_container_name(
dynamic_sidecar_endpoint, dynamic_sidecar_network_name
)
== "a_test_container"
)
@pytest.mark.parametrize("dynamic_sidecar_network_name", ["a_test_network"])
async def test_get_entrypoint_container_name_api_not_found(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
dynamic_sidecar_network_name: str,
) -> None:
with get_patched_client(
"get_containers_name",
side_effect=UnexpectedStatusError(
Response(status_code=status.HTTP_404_NOT_FOUND, request=AsyncMock()),
status.HTTP_204_NO_CONTENT,
),
) as client:
with pytest.raises(EntrypointContainerNotFoundError):
await client.get_entrypoint_container_name(
dynamic_sidecar_endpoint, dynamic_sidecar_network_name
)
@pytest.mark.parametrize("network_aliases", [[], ["an-alias"], ["alias-1", "alias-2"]])
async def test_attach_container_to_network(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
network_aliases: list[str],
) -> None:
with get_patched_client(
"post_containers_networks_attach",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert (
# pylint:disable=protected-access
await client._attach_container_to_network(
dynamic_sidecar_endpoint,
container_id="container_id",
network_id="network_id",
network_aliases=network_aliases,
)
is None
)
async def test_detach_container_from_network(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
) -> None:
with get_patched_client(
"post_containers_networks_detach",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert (
# pylint:disable=protected-access
await client._detach_container_from_network(
dynamic_sidecar_endpoint,
container_id="container_id",
network_id="network_id",
)
is None
)
@pytest.mark.parametrize("volume_category", VolumeCategory)
@pytest.mark.parametrize("volume_status", VolumeStatus)
async def test_update_volume_state(
get_patched_client: Callable,
dynamic_sidecar_endpoint: AnyHttpUrl,
volume_category: VolumeCategory,
volume_status: VolumeStatus,
) -> None:
with get_patched_client(
"put_volumes",
return_value=Response(status_code=status.HTTP_204_NO_CONTENT),
) as client:
assert (
await client.update_volume_state(
dynamic_sidecar_endpoint,
volume_category=volume_category,
volume_status=volume_status,
)
is None
)
| ITISFoundation/osparc-simcore | services/director-v2/tests/unit/test_modules_dynamic_sidecar_client_api_public.py | test_modules_dynamic_sidecar_client_api_public.py | py | 11,510 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "pydantic.parse_obj_as",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pydantic.AnyHttpUrl",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "pytest.fixture",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pyd... |
39610762661 | from nltk.corpus import brown
import nltk
cfd = nltk.ConditionalFreqDist(
(genre,word)
for genre in brown.categories()
for word in brown.words(categories=genre))
genre_word = [(genre, word)
for genre in ['news']
for word in brown.words(categories=genre)]
print(len(genre_word))
print(genre_word[:5]) | milliongashawbeza/PublicNLPA | counting_words.py | counting_words.py | py | 319 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.ConditionalFreqDist",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.brown.categories",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.brown",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "... |
33186103812 | from simplejson import dumps
from webob import Response
from pycurl import Curl
from subprocess import Popen, PIPE
from multiprocessing import Queue
from traceback import format_exc
from time import sleep
import logging
import tarfile
import os
import os.path
import urllib
import uuid
import sys
import os
from config import conf
from common import RequestHandler
class GitRepository(object):
def __init__(self, path=None):
self.path = path
def _cmd(self, args, shell=False):
try:
os.chdir(self.path)
except: pass
logging.debug('cwd: %s exec: %s' % (os.getcwd(), ' '.join(args)))
p = Popen(args, stdout=PIPE, stderr=PIPE, shell=shell)
ret = (p.communicate(), p.returncode)
if ret[0][0]:
logging.debug('\n'.join(ret[0]))
return ret
def _git(self, args):
return self._cmd(['/usr/bin/git'] + args)
def clone(self, gitpath):
return self._git(['clone', gitpath, self.path])
def checkout(self, ref):
return self._git(['checkout', ref])
def submodule_init(self):
return self._git(['submodule', 'init'])
def submodule_update(self):
return self._git(['submodule', 'update'])
def ls_remote(self, gitpath):
output, retcode = self._git(['ls-remote', '--heads', '--tags', gitpath])
stdout, stderr = output
return [x.split('\t') for x in stdout.split('\n') if x]
def show_ref(self):
output, retcode = self._git(['show-ref', '--heads', '--tags'])
stdout, stderr = output
return [x.split(' ', 1) for x in stdout.split('\n') if x]
def build(self, signkey, pbuilderrc, resultsdir):
if 'refs/heads/upstream' in [x[1] for x in self.show_ref()]:
cmd = ['/usr/bin/git-buildpackage', '--git-sign', '--git-cleaner="fakeroot debian/rules clean"', '--git-keyid="%s"' % signkey, '--git-builder="pdebuild --debsign-k %s --auto-debsign --configfile %s --debbuildopts "-i.git -I.git -sa" --buildresult %s' % (signkey, pbuilderrc, resultsdir)]
else:
cmd = ['/usr/bin/pdebuild', '--debsign-k', signkey, '--auto-debsign', '--debbuildopts', '-i.git -I.git -sa', '--configfile', pbuilderrc, '--buildresult', resultsdir]
return self._cmd(cmd)
class PackageHandler(RequestHandler):
def get(self, gitpath, gitrepo):
gitpath = os.path.join(conf('buildbot.gitpath.%s' % gitpath), gitrepo)
repo = GitRepository()
refs = repo.ls_remote(gitpath)
return Response(status=200, body=dumps(refs))
def post(self, gitpath, gitrepo):
if not 'ref' in self.request.params:
return Response(status=400, body='Required parameter "ref" is missing. You must pass a git tag, branch, or commit ID to be built.\n')
gitpath = os.path.join(conf('buildbot.gitpath.%s' % gitpath), gitrepo)
ref = self.request.params['ref']
cburl = self.request.params.get('cburl', None)
submodules = self.request.params.get('submodules', None)
buildid = uuid.uuid4().hex
build_worker(gitpath, ref, buildid, cburl, submodules)
return Response(status=200, body=buildid + '\n')
class RepoListHandler(RequestHandler):
def get(self, gitpath):
try:
gitindex = conf('buildbot.gitindex.%s' % gitpath)
except KeyError:
return Response(status=404, body='Unknown git path')
response = urllib.urlopen(gitindex)
index = response.read()
index = [x.strip('\r\n ').split(' ')[0].rsplit('.')[0] for x in index.split('\n') if x.strip('\r\n ')]
return Response(status=200, body=dumps(index))
class TarballHandler(RequestHandler):
def get(self, buildid):
builddir = os.path.join(conf('buildbot.buildpath'), buildid)
if not os.path.exists(builddir):
return Response(status=404, body='The build ID does not exist.\n')
tarpath = os.path.join(builddir, 'package.tar.gz')
if not os.path.exists(tarpath):
return Response(status=400, body='The build is not done yet.\n')
else:
fd = file(tarpath, 'rb')
data = fd.read()
fd.close()
return Response(status=200, body=data, content_type='application/x-tar-gz')
class StatusHandler(RequestHandler):
def get(self, buildid):
builddir = os.path.join(conf('buildbot.buildpath'), buildid)
if not os.path.exists(builddir):
return Response(status=404, body='The build ID does not exist.\n')
try:
log = file('%s/build.log' % builddir, 'r').read()
except:
log = ''
if not os.path.exists(builddir + '/package.tar.gz'):
return Response(status=400, body='The build is not done yet.\n' + log)
else:
return Response(status=200, body='Build complete.\n' + log)
def buildlog(buildid, message):
filename = os.path.join(conf('buildbot.buildpath'), '%s/build.log' % buildid)
fd = file(filename, 'a+')
fd.write(message + '\n')
fd.close()
logging.debug(message)
def build_thread(gitpath, ref, buildid, cburl=None, submodules=False):
tmpdir = os.path.join(conf('buildbot.buildpath'), buildid)
repo = GitRepository(tmpdir)
output, retcode = repo.clone(gitpath)
if retcode:
buildlog(buildid, 'Unable to clone %s. %s\n' % (gitpath, '\n'.join(output)))
return
output, retcode = repo.checkout(ref)
if retcode:
buildlog(buildid, 'Unable to checkout %s. %s\n' % (ref, '\n'.join(output)))
return
if submodules:
output, retcode = repo.submodule_init()
buildlog(buildid, output[0])
buildlog(buildid, output[1])
output, retcode = repo.submodule_update()
buildlog(buildid, output[0])
buildlog(buildid, output[1])
resultsdir = os.path.join(tmpdir, '.build_results')
os.makedirs(resultsdir)
output, retcode = repo.build(conf('buildbot.signkey'), conf('buildbot.pbuilderrc'), resultsdir)
buildlog(buildid, output[0])
buildlog(buildid, output[1])
#logging.debug(output[0])
#logging.debug(output[1])
os.chdir(resultsdir)
if not os.listdir(resultsdir) or retcode != 0:
buildlog(buildid, 'Nothing in results directory. Giving up.')
return
tarpath = os.path.join(tmpdir, 'package.tar.gz')
tar = tarfile.open(tarpath, 'w:gz')
for name in os.listdir(resultsdir):
tar.add(name)
tar.close()
buildlog(buildid, 'Build complete. Results in %s\n' % tarpath)
data = file(tarpath, 'rb').read()
buildlog(buildid, 'Built %i byte tarball' % len(data))
if cburl:
buildlog(buildid, 'Performing callback: %s' % cburl)
req = Curl()
req.setopt(req.POST, 1)
req.setopt(req.URL, str(cburl))
req.setopt(req.HTTPPOST, [('package', (req.FORM_FILE, str(tarpath)))])
req.setopt(req.WRITEDATA, file('%s/build.log' % tmpdir, 'a+'))
req.perform()
req.close()
def build_worker(gitpath, ref, buildid, cburl, submodules):
if os.fork() == 0:
build_thread(gitpath, ref, buildid, cburl, submodules)
| JeremyGrosser/repoman | repoman/buildbot.py | buildbot.py | py | 7,178 | python | en | code | 84 | github-code | 6 | [
{
"api_name": "os.chdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number"... |
926386452 | import os
from unittest.mock import patch
import pytest
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.externalapis.etherscan import Etherscan
from rotkehlchen.tests.utils.mock import MockResponse
from rotkehlchen.typing import ExternalService, ExternalServiceApiCredentials
@pytest.fixture(scope='function')
def temp_etherscan(function_scope_messages_aggregator, tmpdir_factory):
directory = tmpdir_factory.mktemp('data')
db = DBHandler(
user_data_dir=directory,
password='123',
msg_aggregator=function_scope_messages_aggregator,
initial_settings=None,
)
# Test with etherscan API key
api_key = os.environ.get('ETHERSCAN_API_KEY', None)
if api_key:
db.add_external_service_credentials(credentials=[
ExternalServiceApiCredentials(service=ExternalService.ETHERSCAN, api_key=api_key),
])
etherscan = Etherscan(database=db, msg_aggregator=function_scope_messages_aggregator)
return etherscan
def patch_etherscan(etherscan):
count = 0
def mock_requests_get(_url):
nonlocal count
if count == 0:
response = (
'{"status":"0","message":"NOTOK",'
'"result":"Max rate limit reached, please use API Key for higher rate limit"}'
)
else:
response = '{"jsonrpc":"2.0","id":1,"result":"0x1337"}'
count += 1
return MockResponse(200, response)
return patch.object(etherscan.session, 'get', wraps=mock_requests_get)
def test_maximum_rate_limit_reached(temp_etherscan):
"""
Test that we can handle etherscan's rate limit repsponse properly
Regression test for https://github.com/rotki/rotki/issues/772"
"""
etherscan = temp_etherscan
etherscan_patch = patch_etherscan(etherscan)
with etherscan_patch:
result = etherscan.eth_call(
'0x4678f0a6958e4D2Bc4F1BAF7Bc52E8F3564f3fE4',
'0xc455279100000000000000000000000027a2eaaa8bebea8d23db486fb49627c165baacb5',
)
assert result == '0x1337'
| fakecoinbase/rotkislashrotki | rotkehlchen/tests/external_apis/test_etherscan.py | test_etherscan.py | py | 2,080 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rotkehlchen.db.dbhandler.DBHandler",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "rot... |
38544511106 |
import cv2
import mss
from PIL import Image
import numpy as np
import time
import json
import math
with open('Crypt.json', 'r') as json_file:
data = json.load(json_file)
with open('ItemGroups.json', 'r') as json_file:
item_data = json.load(json_file)
# record video of screen using cv2
fps = 30
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('output.mp4', fourcc, fps, (2560, 1440))
mon = {'left': 0, 'top': 0, 'width': 2560, 'height': 1440}
map_unfound = cv2.imread('Crypt_06.png')
map_found = map_unfound # Assign default value
map_unfound_grey = cv2.cvtColor(map_found, cv2.COLOR_BGR2GRAY)
MIN_CONFIDENCE = 0.55
map_count = 1
resized = False
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
# Draw a blue dot at the clicked location
cv2.circle(map_found, (x, y), 5, (255, 0, 0), -1)
# Log the coordinates of the click
print(f'Clicked at ({x}, {y})')
def transform (point, map, scale = 1):
h, w, _ = map.shape
x, y = point
x = scale * (1 * x + 0)
y = scale * (1 * y + 0)
x = w - x *2
y = h - y*2
y = h - y
return (x, y)
with mss.mss() as sct:
detected_location = False
while True:
img = sct.grab(mon)
frame = Image.frombytes(
'RGB',
(img.width, img.height),
img.rgb,
)
frame = np.array( frame)
out.write(frame)
# Resize the frame, Convert to grayscale. 1440p
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = frame[1160:1380, 2240:2460]
frame = cv2.resize(frame, (int(frame.shape[1] * 0.8), int(frame.shape[0] * 0.8)))
if not(detected_location):
if(map_count < 6):
map_count += 1
else:
map_count = 1
map_unfound = cv2.imread(f'Crypt_0{map_count}.png')
map_unfound_grey = cv2.cvtColor(map_unfound, cv2.COLOR_BGR2GRAY)
map_unfound = cv2.resize(map_unfound, (1100,1100))
map_unfound = map_unfound[86:1010, 87:1002]
map_unfound = cv2.resize(map_unfound, (690,690))
map_unfound_grey = cv2.cvtColor(map_unfound, cv2.COLOR_BGR2GRAY)
resized = True
else:
MIN_CONFIDENCE = 0.32
map_found = map_unfound
cv2.imshow('map ' + str(map_count), map_found)
if "map" + str(map_count) in data:
for entry in data["map" + str(map_count)]:
entry_id = entry.get("id")
coordinates = entry.get("coordinates")
lat, lng = transform((coordinates["lat"], coordinates["lng"]), map_found)
lat += 50; lng -= 55
for item in item_data["Golden Chest"]:
if(entry_id == item):
cv2.circle(map_found, (int(lng), int(lat)), 5, (23, 229, 232), -1)
break
if(entry_id == "Id_Spawner_Props_Statue01"):
cv2.circle(map_found, (int(lng), int(lat)), 5, (65, 232, 23), -1)
if(entry_id == "BP_CryptEscape"):
cv2.circle(map_found, (int(lng), int(lat)), 5, (232, 159, 23), -1)
#if(entry_id == "SpawnPoint"):
#cv2.circle(map_found, (int(lng), int(lat)), 5, (245, 27, 238), -1)
cv2.setMouseCallback('map ' + str(map_count), click_event)
result = cv2.matchTemplate(map_unfound_grey, frame, cv2.TM_CCOEFF_NORMED)
if (result.max() > MIN_CONFIDENCE):
detected_location = True
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Draw player's location on reference map
cv2.circle(
map_found,
(int(max_loc[0] + 25 + frame.shape[1] / 2),
int(max_loc[1] - 25 + frame.shape[0] / 2)),
5, (0, 0, 255), -1)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time.sleep(1/fps)
out.release()
| debug-it/DarkAndDarker-MapHelper | record.py | record.py | py | 4,278 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter",
"lin... |
9345182500 | from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import lib.db
from lib.helper import remove_tags, open_selenium
from lib.log import log_text as log
url = "https://2e.aonprd.com/Ancestries.aspx"
def upload_heritage_data():
log("Starting Heritage Upload Preperation")
heritage_data = organize_heritage_data()
log("Preparation Done")
log("Clearing Table")
conn, row_count, result = lib.db.query_database("DELETE FROM official_heritages;", get_result=True, close_conn=False)
log("Starting INSERT Process")
for heritage in heritage_data:
log("Inserting " + heritage + " Into Database")
conn = lib.db.query_database("INSERT INTO official_heritages VALUES (" + heritage + ");", connection=conn, close_conn=False)[0]
log("Commiting Database Changes")
conn.commit()
log("Closing Connection")
conn.close()
def grab_heritage_data():
heritage_output = []
log("Opening Browser")
driver = open_selenium()
log("Going to Page: " + url)
driver.get(url)
log("Waiting for Page to Load")
time.sleep(5)
log("Getting Page Source")
html = driver.page_source
log("Setting up BeautifulSoup with Source")
soup = BeautifulSoup(html, "html.parser")
log("Finding Initial HTML Container")
container = soup.find(id="ctl00_RadDrawer1_Content_MainContent_DetailedOutput")
log("Finding All Categories in Container")
name_list = container.find_all("h2")
for item in name_list:
log("Grabbing Name in Category")
elements = item.text.split("\n")
log("Found: " + elements[0])
log("Getting All Links in Category")
links = item.find_all("a")
output_link = ""
log("Finding Ancestry Page Link")
for link in links:
if link.get("href").startswith("Ancestries.aspx"):
output_link = "https://2e.aonprd.com/" + link.get("href")
log("Found: " + output_link)
break
log("Opening Ancestry Page")
ancestry_driver = open_selenium()
ancestry_driver.get(output_link)
log("Waiting for Page to Load")
time.sleep(5)
log("Getting Ancestry Page Source")
ancestry_html = ancestry_driver.page_source
log("Setting up BeautifulSoup with Page Source")
ancestry_soup = BeautifulSoup(ancestry_html, "html.parser")
log("Finding Sub Navigation")
sub_nav_container = ancestry_soup.find(id="ctl00_RadDrawer1_Content_MainContent_SubNavigation")
sub_nav_list = sub_nav_container.find_all("h2")
log("Getting All Sub Navigation Headings")
heritage_list_link = ""
log("Searching Headings for Heritage Link")
for nav in sub_nav_list:
nav_links = nav.find_all("a")
for n in nav_links:
if n.get("href").startswith("Heritages.aspx"):
heritage_list_link = "https://2e.aonprd.com/" + n.get("href")
log(f"Found Heritage Link for {elements[0]}: {heritage_list_link}")
log("Closing Ancestry Browser. Opening Heritage Browser")
ancestry_driver.close()
heritage_driver = open_selenium()
heritage_driver.get(heritage_list_link)
log("Waiting for Page to Load")
time.sleep(5)
log("Setting up BeautifulSoup with Page Source")
heritage_html = heritage_driver.page_source
heritage_soup = BeautifulSoup(heritage_html, "html.parser")
log("Getting Heritage List Container")
heritage_container = heritage_soup.find(id="ctl00_RadDrawer1_Content_MainContent_DetailedOutput")
log("Getting All Headings")
heritage_list = heritage_container.find_all("h2")
heritage_name = ""
heritage_link = ""
heritage_summary = ""
log("Starting Search for Heritages")
i = 0
for heritage in heritage_list:
heritage_links = heritage.find_all("a")
for l in heritage_links:
if l.get("href").startswith("Heritages.aspx"):
heritage_name = l.text.split("\n")[0]
log("Found Heritage: " + heritage_name)
heritage_link = "https://2e.aonprd.com/" + l.get("href")
link_pos = heritage_html.find(l.get("href"))
print(f"Link Pos: {link_pos}")
versatile_heritage_pos = heritage_html.index("<h1 class=\"title\">Versatile Heritages</h1>")
half_human_heritage_pos = heritage_html.find("<h1 class=\"title\">Half-Human Heritages")
if half_human_heritage_pos == -1 or link_pos < half_human_heritage_pos:
start_pos = heritage_html.index("<br>", link_pos) + len("<br>")
else:
first_break_pos = heritage_html.index("<br>", link_pos) + len("<br>")
start_pos = heritage_html.index("<br>", first_break_pos) + len("<br>")
h3_pos = heritage_html.find("<h3", start_pos)
br_pos = heritage_html.find("<br>", start_pos)
end_pos = 0
print(f"H3 Pos: {h3_pos}; BR Pos: {br_pos}")
if h3_pos < br_pos and h3_pos != -1:
end_pos = h3_pos
elif br_pos < h3_pos and br_pos != -1:
end_pos = br_pos
elif br_pos != -1 and h3_pos == -1:
end_pos = br_pos
elif h3_pos != -1 and br_pos == -1:
end_pos = h3_pos
if end_pos > versatile_heritage_pos:
end_pos = versatile_heritage_pos
if start_pos > versatile_heritage_pos:
break
print(f"End Pos: {end_pos}; Next 50 Characters: {heritage_html[end_pos: end_pos + 50]}")
heritage_summary = heritage_html[start_pos:end_pos].strip()
print(heritage_summary)
if heritage_summary.find("<b>Source</b>") > -1:
end_pos += 3
temp_pos = heritage_html.find("<b>Source</b>", start_pos)
start_pos = heritage_html.find("<br>", temp_pos)
h3_pos = heritage_html.find("<h3", end_pos)
br_pos = heritage_html.find("<br>", end_pos)
if h3_pos < br_pos and h3_pos != -1:
end_pos = h3_pos
elif br_pos < h3_pos and br_pos != -1:
end_pos = br_pos
if end_pos > versatile_heritage_pos:
end_pos = versatile_heritage_pos
if start_pos > versatile_heritage_pos:
break
heritage_summary = heritage_html[start_pos:end_pos].strip()
if heritage_summary.find("PFS Note") > -1:
end_pos += 3
temp_pos = heritage_html.find("PFS Note", start_pos)
start_pos = heritage_html.find("<br>", temp_pos)
h3_pos = heritage_html.find("<h3", end_pos)
br_pos = heritage_html.find("<br>", end_pos)
if h3_pos < br_pos and h3_pos != -1:
end_pos = h3_pos
elif br_pos < h3_pos and br_pos != -1:
end_pos = br_pos
if end_pos > versatile_heritage_pos:
end_pos = versatile_heritage_pos
if start_pos > versatile_heritage_pos:
break
heritage_summary = heritage_html[start_pos:end_pos].strip()
heritage_summary = remove_tags(heritage_summary, tag_to_remove="h2", remove_inside=True)
heritage_summary = remove_tags(heritage_summary, tag_to_remove="table", remove_inside=True)
heritage_summary = remove_tags(heritage_summary, tag_to_remove="i")
heritage_summary = remove_tags(heritage_summary, tag_to_remove="u")
heritage_summary = remove_tags(heritage_summary, tag_to_remove="b")
heritage_summary = remove_tags(heritage_summary, tag_to_remove="a")
log(str([heritage_name, heritage_link, elements[0], heritage_summary]))
heritage_output.append([heritage_name, heritage_link, elements[0], heritage_summary])
nav_container = soup.find(id="ctl00_RadDrawer1_Content_MainContent_Navigation")
nav_links = nav_container.find_all("a")
for link in nav_links:
if link.get("href").endswith("Versatile=true"):
versatile_heritage_link = "https://2e.aonprd.com/" + link.get("href")
log("Opening Versatile Heritage Browser")
versatile_heritage_driver = open_selenium()
versatile_heritage_driver.get(versatile_heritage_link)
log("Waiting for Page to Load")
time.sleep(5)
log("Setting up BeautifulSoup with Page Source")
versatile_heritage_html = versatile_heritage_driver.page_source
versatile_heritage_soup = BeautifulSoup(versatile_heritage_html, "html.parser")
log("Getting Heritage List Container")
versatile_heritage_container = versatile_heritage_soup.find(id="ctl00_RadDrawer1_Content_MainContent_DetailedOutput")
log("Getting All Headings")
versatile_heritage_list = versatile_heritage_container.find_all("h2")
versatile_heritage_name = ""
versatile_heritage_link = ""
versatile_heritage_summary = ""
log("Searching For Versatile Heritages")
for heritage in versatile_heritage_list:
vh_links = heritage.find_all("a")
for l in vh_links:
if l.get("href").startswith("Ancestries.aspx"):
versatile_heritage_name = l.text.split("\n")[0]
log("Found Heritage: " + versatile_heritage_name)
vh_ancestry_link = "https://2e.aonprd.com/" + l.get("href")
log("Opening Versatile Heritage Ancestry Browser")
vh_ancestry_driver = open_selenium()
vh_ancestry_driver.get(vh_ancestry_link)
log("Waiting for Page to Load")
time.sleep(5)
log("Setting up BeautifulSoup with Page Source")
vh_ancestry_html = vh_ancestry_driver.page_source
vh_ancestry_soup = BeautifulSoup(vh_ancestry_html, "html.parser")
content_pos = vh_ancestry_soup.find(id="ctl00_RadDrawer1_Content_MainContent_DetailedOutput").sourcepos
vh_h1_pos = vh_ancestry_html.index("<h1 class=\"title\">Versatile Heritage</h1>", content_pos)
vh_h2_pos = vh_ancestry_html.index("</h2>", vh_h1_pos) + len("</h2>")
break_pos_1 = vh_ancestry_html.index("<br>", vh_h2_pos) + len("<br>")
break_pos_2 = vh_ancestry_html.index("<br>", break_pos_1) + len("<br>")
break_pos_3 = vh_ancestry_html.index("<br>", break_pos_2) + len("<br>")
end_pos = 0
span_pos = vh_ancestry_html.find("</span>", break_pos_3)
h3_pos = vh_ancestry_html.find("<h3 class", break_pos_3)
if h3_pos == -1:
end_pos = span_pos
else:
if span_pos < h3_pos and span_pos != -1:
end_pos = span_pos
elif h3_pos < span_pos and h3_pos != -1:
end_pos = h3_pos
versatile_heritage_summary = vh_ancestry_html[break_pos_3:end_pos]
versatile_heritage_summary = remove_tags(versatile_heritage_summary, tag_to_remove="h2", remove_inside=True)
versatile_heritage_summary = remove_tags(versatile_heritage_summary, tag_to_remove="table", remove_inside=True)
versatile_heritage_summary = remove_tags(versatile_heritage_summary, tag_to_remove="i")
versatile_heritage_summary = remove_tags(versatile_heritage_summary, tag_to_remove="a")
log(str([versatile_heritage_name, vh_ancestry_link, "Versatile", versatile_heritage_summary]))
heritage_output.append([versatile_heritage_name, vh_ancestry_link, "Versatile", versatile_heritage_summary])
return heritage_output
def organize_heritage_data():
log("Getting Heritage Data")
output = grab_heritage_data()
organized_data = []
log("Starting to Organize Heritage Data")
for heritage in output:
organized_data.append(f"\"{heritage[0]}\", \"{heritage[1]}\", \"{heritage[2]}\", \"{heritage[3]}\"")
log(f"Added \"{heritage[0]}\", \"{heritage[1]}\", \"{heritage[2]}\", \"{heritage[3]}\" to Organized Data")
return organized_data
| sean-francis113/pf2edatascraper | lib/heritages.py | heritages.py | py | 13,500 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "lib.log.log_text",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lib.log.log_text",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "lib.log.log_text",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "lib.db.db.query_da... |
16312524711 | import jax.numpy as np
from jax import grad, nn, random, jit
from jax.experimental import stax, optimizers
from jax.experimental.optimizers import l2_norm
from jax.numpy import linalg
from jax.experimental.stax import Dense, Relu, Tanh, Conv, MaxPool, Flatten, Softmax, LogSoftmax, Sigmoid
from jax.tree_util import tree_flatten, tree_unflatten, tree_map
from jax.nn import log_sigmoid
from mnist import mnist
from tqdm import tqdm
import itertools
import pickle
LogSigmoid = elementwise(log_sigmoid)
def model(rng):
"""Feature extraction network."""
init_params, forward = stax.serial(
Conv(16, (8, 8), padding='SAME', strides=(2, 2)),
Relu,
MaxPool((2, 2), (1, 1)),
Conv(32, (4, 4), padding='VALID', strides=(2, 2)),
Relu,
MaxPool((2, 2), (1, 1)),
Flatten,
Dense(32),
Relu,
Dense(1),
LogSigmoid,
)
temp, rng = random.split(rng)
params = init_params(temp, (-1, 28, 28, 1))[1]
return params, forward
def data_stream(rng, batch_size, X, y):
num_complete_batches, leftover = divmod(X.shape[0], batch_size)
num_batches = num_complete_batches + bool(leftover)
while True:
temp, rng = random.split(rng)
perm = random.permutation(temp, X.shape[0])
for i in range(num_batches):
batch_idx = perm[i*batch_size:(i+1)*batch_size]
yield X[batch_idx], y[batch_idx]
if __name__ == "__main__":
rng = random.PRNGKey(0)
X, y, X_test, y_test = mnist()
X, X_test = X.reshape(-1, 28, 28, 1), X_test.reshape(-1, 28, 28, 1)
y, y_test = (np.argmax(y, 1) % 2 == 1).astype(np.float32), (np.argmax(y_test, 1) % 1 == 1).astype(np.float32)
temp, rng = random.split(rng)
params, predict = model(temp)
def loss(params, batch, l2=0.05):
X, y = batch
y_hat = predict(params, X).reshape(-1)
return -np.mean(y * np.log(y_hat) + (1. - y) * np.log(1. - y_hat))
@jit
def update(i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(loss)(params, batch), opt_state)
iterations = 5000
batch_size = 64
step_size = 0.001
opt_init, opt_update, get_params = optimizers.adam(step_size)
opt_state = opt_init(params)
temp, rng = random.split(rng)
batches = data_stream(temp, batch_size, X, y)
for i in tqdm(range(iterations)):
opt_state = update(i, opt_state, next(batches))
if i % 1000 == 0:
params = get_params(opt_state)
print('Loss: {:.4f}'.format(loss(params, (X, y))))
params = get_params(opt_state)
exit()
pickle.dump(lr_params, open('logistic_regression_params.pkl', 'wb'))
pickle.dump(logistic_regression, open('logistic_regression.pkl', 'wb'))
pickle.dump(fe_params, open('feature_extractor_params.pkl', 'wb'))
pickle.dump(feature_extractor, open('feature_extractor.pkl', 'wb'))
| ChrisWaites/data-deletion | src/d2d/projected_mnist/debug_for_seth.py | debug_for_seth.py | py | 2,756 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "jax.nn.log_sigmoid",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "jax.experimental.stax.serial",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "jax.experimental.stax.Relu",
"line_number": 22,
"usage_type": "argument"
},
{
... |
14852849903 | from collections import ChainMap
import yaml
with open('eve_static_data/invFlags.yaml') as flags_file:
INV_FLAGS = {item['flagID']: item for item in yaml.full_load(flags_file)}
INVENTORY_POSITIONS = [
*range(92, 99+1), # Rigs
*range(27, 34+1), # High Slots
*range(19, 26+1), # Med Slots
*range(11, 18+1), # Low Slots
0 # Everything Else
]
| DeForce/py_killboard | helpers/static.py | static.py | py | 386 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "yaml.full_load",
"line_number": 6,
"usage_type": "call"
}
] |
70383323069 | import copy
from typing import List, Optional
def deep_merge_dicts(original: dict, new_dict: dict) -> dict:
"""
Overview:
Merge two dicts by calling ``deep_update``
Arguments:
- original (:obj:`dict`): Dict 1.
- new_dict (:obj:`dict`): Dict 2.
Returns:
- merged_dict (:obj:`dict`): A new dict that is d1 and d2 deeply merged.
"""
original = original or {}
new_dict = new_dict or {}
merged = copy.deepcopy(original)
if new_dict: # if new_dict is neither empty dict nor None
deep_update(merged, new_dict, True, [])
return merged
def deep_update(
original: dict,
new_dict: dict,
new_keys_allowed: bool = False,
whitelist: Optional[List[str]] = None,
override_all_if_type_changes: Optional[List[str]] = None
):
"""
Overview:
Update original dict with values from new_dict recursively.
Arguments:
- original (:obj:`dict`): Dictionary with default values.
- new_dict (:obj:`dict`): Dictionary with values to be updated
- new_keys_allowed (:obj:`bool`): Whether new keys are allowed.
- whitelist (:obj:`Optional[List[str]]`):
List of keys that correspond to dict
values where new subkeys can be introduced. This is only at the top
level.
- override_all_if_type_changes(:obj:`Optional[List[str]]`):
List of top level
keys with value=dict, for which we always simply override the
entire value (:obj:`dict`), if the "type" key in that value dict changes.
.. note::
If new key is introduced in new_dict, then if new_keys_allowed is not
True, an error will be thrown. Further, for sub-dicts, if the key is
in the whitelist, then new subkeys can be introduced.
"""
whitelist = whitelist or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
raise RuntimeError("Unknown config parameter `{}`. Base config have: {}.".format(k, original.keys()))
# Both original value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if k in override_all_if_type_changes and \
"type" in value and "type" in original[k] and \
value["type"] != original[k]["type"]:
original[k] = value
# Whitelisted key -> ok to add new subkeys.
elif k in whitelist:
deep_update(original[k], value, True)
# Non-whitelisted key.
else:
deep_update(original[k], value, new_keys_allowed)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original
| opendilab/GoBigger | gobigger/utils/config_utils.py | config_utils.py | py | 2,978 | python | en | code | 483 | github-code | 6 | [
{
"api_name": "copy.deepcopy",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line... |
73500508668 | import argparse
import copy
import csv
import os
import warnings
import numpy
import torch
import tqdm
import yaml
from torch.utils import data
from nets import nn
from utils import util
from utils.dataset import Dataset
warnings.filterwarnings("ignore")
def learning_rate(args, params):
def fn(x):
return (1 - x / args.epochs) * (1.0 - params['lrf']) + params['lrf']
return fn
def train(args, params):
# Model
model = nn.yolo_v5_n(len(params['names'].values())).cuda()
# Model attributes
params['box'] *= 3 / model.head.nl
params['obj'] *= (args.input_size / 640) ** 2 * 3 / model.head.nl
params['cls'] *= len(params['names'].values()) / 80 * 3 / model.head.nl
# Optimizer
accumulate = max(round(64 / (args.batch_size * args.world_size)), 1)
params['weight_decay'] *= args.batch_size * args.world_size * accumulate / 64
p = [], [], []
for v in model.modules():
if hasattr(v, 'bias') and isinstance(v.bias, torch.nn.Parameter):
p[2].append(v.bias)
if isinstance(v, torch.nn.BatchNorm2d):
p[1].append(v.weight)
elif hasattr(v, 'weight') and isinstance(v.weight, torch.nn.Parameter):
p[0].append(v.weight)
optimizer = torch.optim.SGD(p[2], params['lr0'], params['momentum'], nesterov=True)
optimizer.add_param_group({'params': p[0], 'weight_decay': params['weight_decay']})
optimizer.add_param_group({'params': p[1]})
del p
# Scheduler
lr = learning_rate(args, params)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr, last_epoch=-1)
# EMA
ema = util.EMA(model) if args.local_rank == 0 else None
filenames = []
with open('../Dataset/COCO/train2017.txt') as reader:
for filename in reader.readlines():
filename = filename.rstrip().split('/')[-1]
filenames.append('../Dataset/COCO/images/train2017/' + filename)
dataset = Dataset(filenames, args.input_size, params, True)
if args.world_size <= 1:
sampler = None
else:
sampler = data.distributed.DistributedSampler(dataset)
loader = data.DataLoader(dataset, args.batch_size, sampler is None, sampler,
num_workers=8, pin_memory=True, collate_fn=Dataset.collate_fn)
util.check_anchors(dataset, model, args, params)
model.half().float() # pre-reduce anchor precision
if args.world_size > 1:
# DDP mode
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(module=model,
device_ids=[args.local_rank],
output_device=args.local_rank)
# Start training
best = 0
num_batch = len(loader)
amp_scale = torch.cuda.amp.GradScaler()
criterion = util.ComputeLoss(model, params)
num_warmup = max(round(params['warmup_epochs'] * num_batch), 100)
with open('weights/step.csv', 'w') as f:
if args.local_rank == 0:
writer = csv.DictWriter(f, fieldnames=['epoch', 'mAP@50', 'mAP'])
writer.writeheader()
for epoch in range(args.epochs):
model.train()
m_loss = util.AverageMeter()
if args.world_size > 1:
sampler.set_epoch(epoch)
p_bar = enumerate(loader)
if args.local_rank == 0:
print(('\n' + '%10s' * 3) % ('epoch', 'memory', 'loss'))
if args.local_rank == 0:
p_bar = tqdm.tqdm(p_bar, total=num_batch) # progress bar
optimizer.zero_grad()
for i, (samples, targets, _) in p_bar:
x = i + num_batch * epoch # number of iterations
samples = samples.cuda().float() / 255
targets = targets.cuda()
# Warmup
if x <= num_warmup:
xp = [0, num_warmup]
fp = [1, 64 / (args.batch_size * args.world_size)]
accumulate = max(1, numpy.interp(x, xp, fp).round())
for j, y in enumerate(optimizer.param_groups):
if j == 0:
fp = [params['warmup_bias_lr'], y['initial_lr'] * lr(epoch)]
else:
fp = [0.0, y['initial_lr'] * lr(epoch)]
y['lr'] = numpy.interp(x, xp, fp)
if 'momentum' in y:
fp = [params['warmup_momentum'], params['momentum']]
y['momentum'] = numpy.interp(x, xp, fp)
# Forward
with torch.cuda.amp.autocast():
outputs = model(samples) # forward
loss = criterion(outputs, targets)
m_loss.update(loss.item(), samples.size(0))
loss *= args.batch_size # loss scaled by batch_size
loss *= args.world_size # gradient averaged between devices in DDP mode
# Backward
amp_scale.scale(loss).backward()
# Optimize
if x % accumulate == 0:
amp_scale.step(optimizer) # optimizer.step
amp_scale.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Log
if args.local_rank == 0:
memory = f'{torch.cuda.memory_reserved() / 1E9:.3g}G' # (GB)
s = ('%10s' * 2 + '%10.4g') % (f'{epoch + 1}/{args.epochs}', memory, m_loss.avg)
p_bar.set_description(s)
# Scheduler
scheduler.step()
if args.local_rank == 0:
# mAP
last = test(args, params, ema.ema)
writer.writerow({'mAP': str(f'{last[1]:.3f}'),
'epoch': str(epoch + 1).zfill(3),
'mAP@50': str(f'{last[0]:.3f}')})
f.flush()
# Update best mAP
if last[1] > best:
best = last[1]
# Save model
ckpt = {'model': copy.deepcopy(ema.ema).half()}
# Save last, best and delete
torch.save(ckpt, './weights/last.pt')
if best == last[1]:
torch.save(ckpt, './weights/best.pt')
del ckpt
if args.local_rank == 0:
util.strip_optimizer('./weights/best.pt') # strip optimizers
util.strip_optimizer('./weights/last.pt') # strip optimizers
torch.cuda.empty_cache()
@torch.no_grad()
def test(args, params, model=None):
filenames = []
with open('../Dataset/COCO/val2017.txt') as reader:
for filename in reader.readlines():
filename = filename.rstrip().split('/')[-1]
filenames.append('../Dataset/COCO/images/val2017/' + filename)
dataset = Dataset(filenames, args.input_size, params, False)
loader = data.DataLoader(dataset, 4, False, num_workers=4,
pin_memory=True, collate_fn=Dataset.collate_fn)
if model is None:
model = torch.load('./weights/best.pt', map_location='cuda')['model']
model.half()
# Configure
model.eval()
iou_v = torch.linspace(0.5, 0.95, 10).cuda() # iou vector for mAP@0.5:0.95
n_iou = iou_v.numel()
m_pre = 0.
m_rec = 0.
map50 = 0.
mean_ap = 0.
metrics = []
p_bar = tqdm.tqdm(loader, desc=('%10s' * 3) % ('precision', 'recall', 'mAP'))
for samples, targets, shapes in p_bar:
samples = samples.cuda()
targets = targets.cuda()
samples = samples.half() # uint8 to fp16/32
samples = samples / 255.0 # 0 - 255 to 0.0 - 1.0
_, _, h, w = samples.shape # batch size, channels, height, width
# Inference
outputs = model(samples)
# NMS
targets[:, 2:] *= torch.tensor((w, h, w, h)).cuda() # to pixels
outputs = util.non_max_suppression(outputs, 0.001, 0.6)
# Metrics
for i, output in enumerate(outputs):
labels = targets[targets[:, 0] == i, 1:]
correct = torch.zeros(output.shape[0], n_iou, dtype=torch.bool).cuda()
if output.shape[0] == 0:
if labels.shape[0]:
metrics.append((correct, *torch.zeros((3, 0)).cuda()))
continue
detections = output.clone()
util.scale(detections[:, :4], samples[i].shape[1:], shapes[i][0], shapes[i][1])
# Evaluate
if labels.shape[0]:
tbox = labels[:, 1:5].clone() # target boxes
tbox[:, 0] = labels[:, 1] - labels[:, 3] / 2 # top left x
tbox[:, 1] = labels[:, 2] - labels[:, 4] / 2 # top left y
tbox[:, 2] = labels[:, 1] + labels[:, 3] / 2 # bottom right x
tbox[:, 3] = labels[:, 2] + labels[:, 4] / 2 # bottom right y
util.scale(tbox, samples[i].shape[1:], shapes[i][0], shapes[i][1])
correct = numpy.zeros((detections.shape[0], iou_v.shape[0]))
correct = correct.astype(bool)
t_tensor = torch.cat((labels[:, 0:1], tbox), 1)
iou = util.box_iou(t_tensor[:, 1:], detections[:, :4])
correct_class = t_tensor[:, 0:1] == detections[:, 5]
for j in range(len(iou_v)):
x = torch.where((iou >= iou_v[j]) & correct_class)
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1)
matches = matches.cpu().numpy()
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[numpy.unique(matches[:, 1], return_index=True)[1]]
matches = matches[numpy.unique(matches[:, 0], return_index=True)[1]]
correct[matches[:, 1].astype(int), j] = True
correct = torch.tensor(correct, dtype=torch.bool, device=iou_v.device)
metrics.append((correct, output[:, 4], output[:, 5], labels[:, 0]))
# Compute metrics
metrics = [torch.cat(x, 0).cpu().numpy() for x in zip(*metrics)] # to numpy
if len(metrics) and metrics[0].any():
tp, fp, m_pre, m_rec, map50, mean_ap = util.compute_ap(*metrics)
# Print results
print('%10.3g' * 3 % (m_pre, m_rec, mean_ap))
# Return results
model.float() # for training
return map50, mean_ap
@torch.no_grad()
def demo(args):
import cv2
# Load model
model = torch.load('./weights/best.pt', map_location='cuda')['model'].float()
model.half()
model.eval()
camera = cv2.VideoCapture(0)
# Check if camera opened successfully
if not camera.isOpened():
print("Error opening video stream or file")
# Read until video is completed
while camera.isOpened():
# Capture frame-by-frame
success, frame = camera.read()
if success:
image = frame.copy()
shape = image.shape[:2]
r = args.input_size / max(shape[0], shape[1])
if r != 1:
resample = cv2.INTER_LINEAR if r > 1 else cv2.INTER_AREA
image = cv2.resize(image, dsize=(int(shape[1] * r), int(shape[0] * r)), interpolation=resample)
height, width = image.shape[:2]
# Scale ratio (new / old)
r = min(1.0, args.input_size / height, args.input_size / width)
# Compute padding
pad = int(round(width * r)), int(round(height * r))
w = (args.input_size - pad[0]) / 2
h = (args.input_size - pad[1]) / 2
if (width, height) != pad: # resize
image = cv2.resize(image, pad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(h - 0.1)), int(round(h + 0.1))
left, right = int(round(w - 0.1)), int(round(w + 0.1))
image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT) # add border
# Convert HWC to CHW, BGR to RGB
x = image.transpose((2, 0, 1))[::-1]
x = numpy.ascontiguousarray(x)
x = torch.from_numpy(x)
x = x.unsqueeze(dim=0)
x = x.cuda()
x = x.half()
x = x / 255
# Inference
outputs = model(x)
# NMS
outputs = util.non_max_suppression(outputs, 0.25, 0.7)
for output in outputs:
output[:, [0, 2]] -= w # x padding
output[:, [1, 3]] -= h # y padding
output[:, :4] /= min(height / shape[0], width / shape[1])
output[:, 0].clamp_(0, shape[1]) # x1
output[:, 1].clamp_(0, shape[0]) # y1
output[:, 2].clamp_(0, shape[1]) # x2
output[:, 3].clamp_(0, shape[0]) # y2
for box in output:
box = box.cpu().numpy()
x1, y1, x2, y2, score, index = box
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
camera.release()
# Closes all the frames
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input-size', default=640, type=int)
parser.add_argument('--batch-size', default=32, type=int)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--epochs', default=600, type=int)
parser.add_argument('--train', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--demo', action='store_true')
args = parser.parse_args()
args.local_rank = int(os.getenv('LOCAL_RANK', 0))
args.world_size = int(os.getenv('WORLD_SIZE', 1))
if args.world_size > 1:
torch.cuda.set_device(device=args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
if args.local_rank == 0:
if not os.path.exists('weights'):
os.makedirs('weights')
util.setup_seed()
util.setup_multi_processes()
with open(os.path.join('utils', 'args.yaml'), errors='ignore') as f:
params = yaml.safe_load(f)
if args.train:
train(args, params)
if args.test:
test(args, params)
if args.demo:
demo(args)
if __name__ == "__main__":
main()
| jahongir7174/YOLOv5-pt | main.py | main.py | py | 14,941 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "nets.nn.yolo_v5_n",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nets.nn",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.nn",
"lin... |
34961662452 | import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
class DynamicEvolutionStats:
def __init__(self, seeds, specialist_type):
self.seeds = seeds
self.specialist_type = specialist_type
self.init_data()
def init_data(self):
self.data = {}
for seed in self.seeds:
self.data[seed] = pd.DataFrame(columns=['generation', 'score', 'cycle'])
def get_data(self, suffix='score'):
for seed in self.seeds:
df = pd.read_csv(f'../../data/specialist/dynamic_evolution/{self.specialist_type}/{seed}_{suffix}.csv')
self.data[seed] = pd.concat([self.data[seed], df]).query("generation >= 1600")
def get_seed(self, seed):
return self.data.get(seed)
def describe_seeds(self):
describe = []
for seed in self.seeds:
df = self.get_seed(seed)
describe.append([
df.score.mean(),
len(df.query('cycle == "score"')),
len(df.query('cycle == "fit"')),
])
return pd.DataFrame(
describe,
columns=['score', 'score_time', 'fit_time']
)
def plot_seeds_scatter(self):
for seed in self.seeds:
df = self.get_seed(seed)
plt.scatter(df.generation, df.score, s=1)
plt.legend(self.seeds)
plt.title(f'All Seeds Specialist Score')
plt.xlabel('generation')
plt.ylabel('score')
plt.show()
def describe_score(self):
df = self.describe_seeds()
plt.boxplot(df.score, labels=['mean'])
plt.title(f'All Seeds Specialist Score Mean')
plt.ylabel('score')
plt.show()
def describe_cycles(self):
df = self.describe_seeds()
plt.boxplot(df[['score_time', 'fit_time']], labels=['score_time', 'fit_time'])
plt.title(f'All Seeds Specialist Cycles')
plt.xlabel('process')
plt.ylabel('cycles')
plt.show()
| arthur-plautz/curriculum-learning | models/specialist/stats/dynamic_evolution_stats.py | dynamic_evolution_stats.py | py | 2,023 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.concat... |
43967691056 | #!/usr/bin/env python
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="split blast results by organism")
parser.add_argument("blast", type=argparse.FileType("r"))
args = parser.parse_args()
blast = [x.split("\t") for x in args.blast.readlines()]
for row in blast:
if "<>" in row[24]:
for i in row[24].strip().split("<>"):
row_copy = row
row_copy[24] = i
print("\t".join(row_copy).strip())
else:
print("\t".join(row).strip())
| TAMU-CPT/galaxy-tools | tools/blast/split_blast.py | split_blast.py | py | 576 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 6,
"usage_type": "call"
}
] |
31877451015 | import os
import math
import copy
import codecs
import numpy as np
import srt
import subprocess
import datetime
from utils import mkdir, basename_without_ext
from voice_detector import VoiceDetector
from tqdm import tqdm
def shift_by_delay(bin_arr2, delay_by_frames):
if delay_by_frames < 0:
return bin_arr2[abs(delay_by_frames):]
return np.concatenate([np.zeros(delay_by_frames).astype(np.uint8), bin_arr2])
def make_list_length_equal(lst1, lst2):
len_lst1 = lst1.shape[0]
len_lst2 = lst2.shape[0]
max_len = max(len_lst1, len_lst2)
return np.concatenate([lst1, np.zeros(max_len - len_lst1).astype(np.uint8)]), np.concatenate([lst2, np.zeros(max_len - len_lst2).astype(np.uint8)])
def error(bin_arr1, bin_arr2):
# MAE
return np.sum(bin_arr1.astype(np.uint8) ^ bin_arr2.astype(np.uint8)) / float(len(bin_arr1))
def get_err(tmp_bin_arr1, tmp_bin_arr2, delay_by_frames):
#tmp_bin_arr1 = arr1[:]
#tmp_bin_arr2 = arr2[:]
# shift by delay
tmp_bin_arr2 = shift_by_delay(tmp_bin_arr2, delay_by_frames)
# align arrays
tmp_bin_arr1, tmp_bin_arr2 = make_list_length_equal(tmp_bin_arr1, tmp_bin_arr2)
# calculate error
tmp_err = error(tmp_bin_arr1, tmp_bin_arr2)
return delay_by_frames, tmp_err
class GetSub:
def __init__(self, aggressiveness, frame_duration_ms, padding_duration_ms):
self.vad = VoiceDetector(
aggressiveness, frame_duration_ms, padding_duration_ms)
def timedelta_to_frame(self, td):
ms = float(td.seconds) * 1000.0 + float(td.microseconds) * 0.001
return int(ms / self.vad.frame_duration_ms)
def binary_array_from_srt(self, srt_path):
common_encodings = ['latin1', 'utf-8', 'utf-16', 'cp1252']
subs = []
for encoding in common_encodings:
try:
srt_file = codecs.open(srt_path, 'r', encoding=encoding)
srt_string = srt_file.read()
srt_file.close()
subs = np.array(list(srt.parse(srt_string)))
break
except BaseException as error:
pass
# print('An exception occurred: {}'.format(error))
start_end_pairs = [(self.timedelta_to_frame(sub.start), self.timedelta_to_frame(sub.end)) for sub in subs]
# convert seconds and microseconds to milliseconds
first_sub_frame = start_end_pairs[0][0]
last_sub_frame = start_end_pairs[-1][1]
bin_array = np.zeros(last_sub_frame).astype(np.uint8)
print('Creating Binary Array from SRT..')
for start_frame, end_frame in tqdm(start_end_pairs):
bin_array[start_frame:end_frame] = 1
# TODO
five_second_delay = int(5 * 1000 / self.vad.frame_duration_ms)
# set max delay to 5% of video
max_delay = max(five_second_delay, int(len(bin_array) * 0.05))
return bin_array, -first_sub_frame, max_delay, subs
def chunks(self, lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def find_best_delay_milliseconds(self, bin_arr1, bin_arr2, delay_range_start, delay_range_end, error_csv_out):
err = math.inf
best_delay = 0
delay_range_len = delay_range_end - delay_range_start
rows = np.zeros((delay_range_len, 2))
early_stop = False
print('Finding Best Delay..')
#with Parallel(n_jobs=cpus, prefer="threads") as parallel:
for i, delay_by_frames in tqdm(enumerate(range(delay_range_start, delay_range_end)), total=delay_range_len):
delay_by_frames, tmp_err = get_err(
bin_arr1,
bin_arr2,
delay_by_frames,
)
if tmp_err < err:
err = tmp_err
best_delay = delay_by_frames
rows[i][0] = delay_by_frames * self.vad.frame_duration_ms * 0.001
rows[i][1] = tmp_err
percent_change = (tmp_err - err) / err
if percent_change > 0.1:
early_stop = True
rows = rows[:(i + 1)]
break
if early_stop:
print('stopping early at', str(int(i / delay_range_len * 100.0)) + '%')
#df = pd.DataFrame(rows, columns=["delay_in_seconds", "MAE"])
#df.set_index("delay_in_seconds", inplace=True)
#df.to_csv(error_csv_out)
return best_delay * self.vad.frame_duration_ms
def align(self, vid_file_path, srt_path, out_dir, original_name):
bin_arr1 = np.array(list(self.vad.detect(vid_file_path))).astype(np.uint8)
bin_arr2, delay_range_start, delay_range_end, subs = self.binary_array_from_srt(srt_path)
best_delay_ms = self.find_best_delay_milliseconds(
bin_arr1,
bin_arr2,
delay_range_start,
delay_range_end,
os.path.join(out_dir, original_name + "_error.csv"),
)
best_delay_sec = best_delay_ms * 0.001
print(f"best delay: {best_delay_sec}s")
out_path = os.path.join(out_dir, original_name + "_synced.srt")
td_to_shift = datetime.timedelta(seconds=best_delay_sec)
print('Shifting Subtitles..')
for subtitle in tqdm(subs):
subtitle.start += td_to_shift
subtitle.end += td_to_shift
with open(out_path, 'w') as file:
file.write(srt.compose(subs))
print('output aligned subs to:', out_path)
def download(self, vid_file_path, language):
out_dir = os.path.dirname(vid_file_path)
temp_dir = "/temp/"
mkdir(out_dir)
mkdir(temp_dir)
command1 = "python OpenSubtitlesDownload.py --cli --auto {} --output {} --lang {}"
command1_list = command1.format(vid_file_path, temp_dir, language).split(" ")
subprocess.call(command1_list)
original_name = basename_without_ext(vid_file_path)
srt_path = os.path.join(temp_dir, original_name + ".srt")
# save original file as 'filename_unsynced.srt'
out_path_unsynced = os.path.join(out_dir, original_name + "_unsynced.srt")
command2 = "cp {} {}"
command2_list = command2.format(srt_path, out_path_unsynced).split(" ")
subprocess.call(command2_list)
print('downloaded subs:', srt_path)
self.align(vid_file_path, srt_path, out_dir, original_name)
| derrick56007/getsub | src/get_sub.py | get_sub.py | py | 6,510 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "numpy.concatenate",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
... |
41714902184 | from Crypto.Util.number import getPrime
from Crypto.Util.number import inverse
import hashlib
import socket
from threading import Thread
host = 'localhost'
port = 6000
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try :
mysocket.connect((host, port))
except socket.error :
print("connexion echouer avec le serveur ")
exit()
print("connexion etablie avec le serveur")
def gen_rsa_keypair (bits):
p=getPrime(bits//2)
q=getPrime(bits//2)
n=p*q
e=655337
d=inverse(e, (p-1)*(q-1))
return((e,n), (d,n)) #cle pub et cle priv
key = gen_rsa_keypair(256)
def rsa(m,key):
return pow(m,key[0],key[1])
def rsa_enc(msg, key):
m = int.from_bytes(msg.encode('utf-8'),'big')
c = rsa(m, key)
return c
def rsa_dec(msg, key):
txt_clair = rsa(msg, key)
return txt_clair.to_bytes((txt_clair.bit_length()+7) // 8,'big').decode('utf-8')
class Send(Thread):
def __init__(self,arg):
Thread.__init__(self)
#super(Send, self).__init__()
self.arg = arg
def run(self):
continuer = True
while(continuer):
message = input()
message1 = self.arg.sendall(repr(key[0]).encode('utf8'))#cle
try:
enchifrer = rsa_enc(message, key[0])
#print("enchiffreeer = ",enchifrer)
#self.arg.send(repr(enchifrer).encode('utf-8'))
dechiffrer = rsa_dec(enchifrer, key[1])
#print("dechiffrer ", dechiffrer)
self.arg.send(dechiffrer.encode('utf-8'))
except socket.error:
continuer = False
break
self.arg.close()
class receive(Thread):
def __init__(self,arg):
Thread.__init__(self)
# super(receive, self).__init__()
self.arg = arg
def run(self):
continuer = True
while(continuer):
try:
message = self.arg.recv(1024).decode('utf-8')
except socket.error:
continuer = False
break
else :
print(">>>>>> {0}".format(message))
self.arg.close()
if __name__ == "__main__":
sn = Send(mysocket)
sn.start()
rv = receive(mysocket)
rv.start() | samyberkane23/chat_s-curis- | client.py | client.py | py | 2,458 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "socket.socket",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "socket.erro... |
11214657296 | import pika
import sys
conn = pika.BlockingConnection(pika.URLParameters('amqp://guest:guest@localhost:25672/%2F'))
channel = conn.channel()
channel.exchange_declare(exchange='direct_logs', exchange_type='direct')
severity = sys.argv[1] if len(sys.argv) > 1 else 'info'
message = ' '.join(sys.argv[2:]) or "Hello World!"
channel.basic_publish(
exchange='direct_logs', routing_key=severity, body=message
)
print(f" [*] Sent {severity}:{message}")
conn.close() | lamida/rabbit-hole | 04-routing/emit_log_direct.py | emit_log_direct.py | py | 465 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pika.BlockingConnection",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pika.URLParameters",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
... |
4397925600 | from multiprocessing import Process,Array
from time import time
import sqlite3
from .config import KASTEN_ANZ,VOK_DIR
class vokabelKartei(Process):
def __init__(self):
self.conn = sqlite3.connect(VOK_DIR+"kartei.sqlite")
self.conn.text_factory = str
self.c = self.conn.cursor()
self.c.execute("""CREATE TABLE IF NOT EXISTS sprachen
(id INTEGER PRIMARY KEY, name TEXT, spr1 TEXT,
spr2 TEXT)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS kapitel
(id INTEGER PRIMARY KEY, name TEXT, spr_id INT)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS vokabeln
(id INTEGER PRIMARY KEY, spr1 TEXT, spr2 TEXT,
kap_id INT, kasten INT, spr_id INT, last_date INT)""")
self.COMMIT_MODE = True
self.DEBUG_MODE = False
def close(self):
self.c.close()
def commit(self):
if self.COMMIT_MODE == True and self.DEBUG_MODE == False:
self.conn.commit()
def execute(self,query_str,args=()):
if self.DEBUG_MODE == True:
print(query_str, args)
self.c.execute(query_str,args)
def set_commit_mode(self,mode):
if mode == True and self.COMMIT_MODE == False:
self.COMMIT_MODE = True
self.commit()
elif mode == False and self.COMMIT_MODE == True:
self.COMMIT_MODE = False
def get_kapitel(self,sprache,kap_id=-1):
if kap_id != -1:
self.execute("SELECT * FROM kapitel WHERE spr_id=? AND id=?",
(sprache,kap_id))
else:
self.execute("SELECT * FROM kapitel WHERE spr_id=?", (sprache,))
return self.c.fetchall()
def get_vok(self,vok_id):
self.execute("SELECT * FROM vokabeln WHERE id=?", (vok_id,))
return list(self.c.fetchall()[0])
def get_sprachen(self,spr_id=None):
if spr_id != None:
self.execute("SELECT * FROM sprachen WHERE id=?", (spr_id,))
else:
self.execute("SELECT * FROM sprachen ORDER BY name ASC")
return [list(x) for x in self.c.fetchall()]
def get_stapel(self,sprache,kapitel=-1,kasten=0):
if kapitel != -1 and kasten != 0:
self.execute("""SELECT * FROM vokabeln
WHERE spr_id=? AND kap_id=? AND kasten=?""",
(sprache,kapitel,kasten))
elif kapitel != -1:
self.execute("""SELECT * FROM vokabeln
WHERE spr_id=? AND kap_id=?""",
(sprache,kapitel))
elif kasten != 0:
self.execute("""SELECT * FROM vokabeln
WHERE spr_id=? AND kasten=?""",
(sprache,kasten))
else:
self.execute("SELECT * FROM vokabeln WHERE spr_id=?", (sprache,))
return self.c.fetchall()
def rem_vok(self,vokids):
if list != type(vokids):
vokids = [vokids]
for vok in vokids:
self.execute("""DELETE FROM vokabeln WHERE id=?""", (vok,))
self.commit()
def rem_kap(self,kap_id):
self.execute("""DELETE FROM kapitel WHERE id=?""", (kap_id,))
self.execute("""DELETE FROM vokabeln WHERE kap_id=?""", (kap_id,))
self.commit()
def rem_sprache(self,spr_id):
self.execute("""DELETE FROM sprachen WHERE id=?""", (spr_id,))
self.execute("""DELETE FROM vokabeln WHERE spr_id=?""", (spr_id,))
self.execute("""DELETE FROM kapitel WHERE spr_id=?""", (spr_id,))
self.commit()
def add_vok(self,*vok):
kapitel = vok[3]
if vok[3] == -1:
kapitel = 0
self.execute("""INSERT INTO vokabeln(spr1,spr2,kap_id,kasten,spr_id)
VALUES (?,?,?,?,?)""",
(vok[0],vok[1],kapitel,1,vok[2]))
self.commit()
return self.c.lastrowid
def add_sprache(self,name,spr1,spr2):
self.execute("""INSERT INTO sprachen(name,spr1,spr2)
VALUES (?,?,?)""",
(name,spr1,spr2))
self.commit()
return self.c.lastrowid
def add_kapitel(self,name,spr_id):
self.execute("""INSERT INTO kapitel(name,spr_id)
VALUES (?,?)""",
(name,spr_id))
self.commit()
return self.c.lastrowid
def edit_sprache(self,spr_id,name,spr1,spr2):
self.execute("""UPDATE sprachen SET name=?,spr1=?,spr2=?
WHERE id=?""",
(name,spr1,spr2,spr_id))
self.commit()
def edit_kapitel(self,kap_id,name):
self.execute("""UPDATE kapitel SET name=? WHERE id=?""", (name,kap_id))
self.commit()
def edit_vok(self,vok_id,spr1,spr2):
self.execute("""UPDATE vokabeln SET spr1=?,spr2=?
WHERE id=?""",
(spr1,spr2,vok_id))
self.commit()
def count_vok(self,sprache,kapitel=0,kasten=0):
if kapitel != 0 and kasten != 0:
self.execute("""SELECT COUNT(*) FROM vokabeln
WHERE spr_id=? AND kap_id=? AND kasten=?""",
(sprache,kapitel,kasten))
elif kasten != 0:
self.execute("""SELECT COUNT(*) FROM vokabeln
WHERE spr_id=? AND kasten=?""",
(sprache,kasten))
elif kapitel != 0:
self.execute("""SELECT COUNT(*) FROM vokabeln
WHERE spr_id=? AND kap_id=?""",
(sprache,kapitel))
else:
self.execute("""SELECT COUNT(*) FROM vokabeln
WHERE spr_id=?""",
(sprache,))
return self.c.fetchall()[0][0]
def change_kasten(self,vok_id,kasten):
if kasten <= KASTEN_ANZ:
self.execute("""UPDATE vokabeln SET kasten=?
WHERE id=?""",
(kasten,vok_id))
self.commit()
def touch_vok(self,vok_id,null=False):
timestamp = int(time())
if null:
timestamp = 0
self.execute("""UPDATE vokabeln SET last_date=?
WHERE id=?""",
(timestamp,vok_id))
self.commit()
def change_kap(self,vok_id,kapitel):
self.execute("""UPDATE vokabeln SET kap_id=?
WHERE id=?""",
(kapitel,vok_id))
self.commit()
def get_duplicate(self,spr1,spr_id,kap_id=-1):
if kap_id != -1:
self.execute("""SELECT * FROM vokabeln
WHERE spr1=? AND spr_id=? AND kap_id=?""",
(spr1,spr_id,kap_id))
else:
self.execute("""SELECT * FROM vokabeln
WHERE spr1=? AND spr_id=?""",
(spr1,spr_id))
ergebnis = self.c.fetchall()
if len(ergebnis) == 0:
return None
else:
return list(ergebnis[0])
| tuxor1337/voktrainer | vok/core/kartei.py | kartei.py | py | 7,065 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "multiprocessing.Process",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.VOK_DIR",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "config.KASTEN_ANZ... |
30543883588 | from . import pblm
import sys
import torch
import torch.nn as nn
class CNN_A(pblm.PrebuiltLightningModule):
def __init__(self, classes):
super().__init__(self.__class__.__name__)
# Model Layer Declaration
self.conv1 = nn.Conv1d(1, 16, kernel_size=5, stride=2)
self.conv2 = nn.Conv1d(16, 32, kernel_size=5, stride=2)
self.conv3 = nn.Conv1d(32, 64, kernel_size=5, stride=2)
self.dense1 = nn.Linear(64 * 309, 512)
self.dense2 = nn.Linear(512, 256)
self.dense3 = nn.Linear(256, classes)
def forward(self, x):
x = x.reshape(x.shape[0], 1, -1)
# Convolutional Layer
x = self.conv1(x)
x = nn.functional.relu(x)
x = self.conv2(x)
x = nn.functional.relu(x)
x = self.conv3(x)
x = nn.functional.relu(x)
# Flattening
x = x.reshape(x.shape[0], -1)
# Dense Layers
x = self.dense1(x)
x = nn.functional.relu(x)
x = self.dense2(x)
x = nn.functional.relu(x)
x = self.dense3(x)
return x
if __name__ == "__main__":
model = CNN_A(4)
| kendreaditya/heart-auscultation | src/models/modules/CNN/CNN.py | CNN.py | py | 1,135 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "torch.nn.Conv1d",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number"... |
22397762010 | import cv2
import pandas as pd
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
class COVIDChestXRayDataset(Dataset):
def __init__(self, path, size=128, augment=None):
super(COVIDChestXRayDataset, self).__init__()
print('{} initialized with size={}, augment={}'.format(self.__class__.__name__, size, augment))
print('Dataset is located in {}'.format(path))
self.size = size
self.augment = augment
image_dir = path / 'images'
metadata_path = path / 'metadata.csv'
df_metadata = pd.read_csv(metadata_path, header=0)
# Drop CT scans
df_metadata = df_metadata[df_metadata['modality'] == 'X-ray']
# Keep only PA/AP/AP Supine, drop Axial, L (lateral)
allowed_views = ['PA', 'AP', 'AP Supine']
df_metadata = df_metadata[df_metadata['view'].isin(allowed_views)]
# COVID-19 = 1, SARS/ARDS/Pneumocystis/Streptococcus/No finding = 0
self.labels = (df_metadata.finding == 'COVID-19').values.reshape(-1, 1)
images = df_metadata.filename
images = images.apply(lambda x: image_dir / x).values.reshape(-1, 1)
self.df = pd.DataFrame(np.concatenate((images, self.labels), axis=1), columns=['image', 'label'])
del images
print("Dataset: {}".format(self.df))
@staticmethod
def _load_image(path, size):
img = Image.open(path)
img = cv2.resize(np.array(img), (size, size), interpolation=cv2.INTER_AREA)
if len(img.shape) == 2:
img = np.expand_dims(img, axis=2)
img = np.dstack([img, img, img])
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# size, size, chan -> chan, size, size
img = np.transpose(img, axes=[2, 0, 1])
return img
def __getitem__(self, index):
row = self.df.iloc[index]
img = self._load_image(row['image'], self.size)
label = row['label']
if self.augment is not None:
img = self.augment(img)
return img, label
def __len__(self):
return self.df.shape[0] | defeatcovid19/defeatcovid19-net-pytorch | datasets/covid_chestxray_dataset.py | covid_chestxray_dataset.py | py | 2,205 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.concat... |
30709482723 |
import cv2 as cv
import numpy as np
from process import Resize, NormalizeImage
class PicoDetProcess():
def __init__(self,
trainsize=[320,320],
mean=[0.485,0.456,0.406],
std=[0.229,0.224,0.225],
score_threshold=0.4,
nms_threshold=0.5
):
self.score_threshold = score_threshold
self.nms_threshold = nms_threshold
self.resize =Resize(trainsize)
self.normalizeImage = NormalizeImage(mean = mean,std =std)
def preprocess(self, images):
input_im_lst = []
input_im_info_lst = []
for im in images:
im, im_info = self.processim(im)
input_im_lst.append(im)
input_im_info_lst.append(im_info)
inputs = self.create_inputs(input_im_lst, input_im_info_lst)
return inputs
def create_inputs(self, imgs, im_info):
"""generate input for different model type
Args:
imgs (list(numpy)): list of images (np.ndarray)
im_info (list(dict)): list of image info
Returns:
inputs (dict): input of model
"""
inputs = {}
im_shape = []
scale_factor = []
if len(imgs) == 1:
inputs['image'] = np.array((imgs[0], )).astype('float32')
inputs['im_shape'] = np.array(
(im_info[0]['im_shape'], )).astype('float32')
inputs['scale_factor'] = np.array(
(im_info[0]['scale_factor'], )).astype('float32')
return inputs
for e in im_info:
im_shape.append(np.array((e['im_shape'], )).astype('float32'))
scale_factor.append(np.array((e['scale_factor'], )).astype('float32'))
inputs['im_shape'] = np.concatenate(im_shape, axis=0)
inputs['scale_factor'] = np.concatenate(scale_factor, axis=0)
imgs_shape = [[e.shape[1], e.shape[2]] for e in imgs]
max_shape_h = max([e[0] for e in imgs_shape])
max_shape_w = max([e[1] for e in imgs_shape])
padding_imgs = []
for img in imgs:
im_c, im_h, im_w = img.shape[:]
padding_im = np.zeros(
(im_c, max_shape_h, max_shape_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = img
padding_imgs.append(padding_im)
inputs['image'] = np.stack(padding_imgs, axis=0)
return inputs
def processim(self, im):
# process image by preprocess_ops
im_info = {
'scale_factor': np.array(
[1., 1.], dtype=np.float32),
'im_shape': None,
}
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
im = cv.cvtColor(im, cv.COLOR_BGR2RGB)
im,im_info = self.resize(im,im_info)
im,im_info = self.normalizeImage(im,im_info)
# im = im.transpose((2, 0, 1)).copy()
return im, im_info
def postprocess(self, inputs, scale_factor):
bboxs = inputs['bboxes']
scores = inputs['scores']
bbox,score = self.nms(bboxs[0],scores[0][0])
for box in bbox:
box[0] = box[0] / scale_factor[1]
box[1] = box[1] / scale_factor[0]
box[2] = box[2] / scale_factor[1]
box[3] = box[3] / scale_factor[0]
outputs = dict(bboxes=np.array(bbox), scores=np.array(score))
return outputs
def nms(self, bounding_boxes, confidence_score):
'''
:param bounding_boxes: 候选框列表,[左上角坐标, 右下角坐标], [min_x, min_y, max_x, max_y], 原点在图像左上角
:param confidence_score: 候选框置信度
:param threshold: IOU阈值
:return: 抑制后的bbox和置信度
'''
picked = []
for i in range(confidence_score.shape[-1]):
if confidence_score[i] > self.score_threshold:
picked.append(i)
bounding_boxes = bounding_boxes[picked,:]
confidence_score = confidence_score[picked]
# 如果没有bbox,则返回空列表
if len(bounding_boxes) == 0:
return [], []
# bbox转为numpy格式方便计算
boxes = np.array(bounding_boxes)
# 分别取出bbox的坐标
start_x = boxes[:, 0]
start_y = boxes[:, 1]
end_x = boxes[:, 2]
end_y = boxes[:, 3]
# 置信度转为numpy格式方便计算
score = np.array(confidence_score) # [0.9 0.75 0.8 0.85]
# 筛选后的bbox和置信度
picked_boxes = []
picked_score = []
# 计算每一个框的面积
areas = (end_x - start_x + 1) * (end_y - start_y + 1)
# 将score中的元素从小到大排列,提取其对应的index(索引),然后输出到order
order = np.argsort(score) # [1 2 3 0]
# Iterate bounding boxes
while order.size > 0:
# The index of largest confidence score
# 取出最大置信度的索引
index = order[-1]
# Pick the bounding box with largest confidence score
# 将最大置信度和最大置信度对应的框添加进筛选列表里
picked_boxes.append(bounding_boxes[index])
picked_score.append(confidence_score[index])
# 求置信度最大的框与其他所有框相交的长宽,为下面计算相交面积做准备
# 令左上角为原点,
# 两个框的左上角坐标x取大值,右下角坐标x取小值,小值-大值+1==相交区域的长度
# 两个框的左上角坐标y取大值,右下角坐标y取小值,小值-大值+1==相交区域的高度
# 这里可以在草稿纸上画个图,清晰明了
x1 = np.maximum(start_x[index], start_x[order[:-1]])
x2 = np.minimum(end_x[index], end_x[order[:-1]])
y1 = np.maximum(start_y[index], start_y[order[:-1]])
y2 = np.minimum(end_y[index], end_y[order[:-1]])
# 计算相交面积,当两个框不相交时,w和h必有一个为0,面积也为0
w = np.maximum(0.0, x2 - x1 + 1)
h = np.maximum(0.0, y2 - y1 + 1)
intersection = w * h
# 计算IOU
ratio = intersection / (areas[index] + areas[order[:-1]] - intersection)
# 保留小于阈值的框的索引
left = np.where(ratio < self.nms_threshold)
# 根据该索引修正order中的索引(order里放的是按置信度从小到大排列的索引)
order = order[left]
return picked_boxes, picked_score | guojin-yan/Automatic_aiming | aiming/person_process.py | person_process.py | py | 6,767 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "process.Resize",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "process.NormalizeImage",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"... |
35782338526 | #%%
import numpy as np
import matplotlib.pyplot as plt
#%%
x = np.arange(0, 6 * np.pi, 0.025)
y_true = np.sin(x)
y = y_true + np.random.normal(scale=1, size=len(x))
plt.scatter(x, y, color="k")
plt.plot(x, y_true, color="red")
#%%
np.random.seed(42)
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
model = HistGradientBoostingRegressor(random_state=42, max_iter=20, max_leaf_nodes=64, min_samples_leaf=30)
model.fit(x.reshape(-1, 1), y)
preds = model.predict(x.reshape(-1, 1))
plt.scatter(x, y)
plt.plot(x, preds, color="red")
#%%
def gen_one_frame(use_fraction: float, left_to_right: bool):
use_fraction = round(use_fraction, 3)
print(use_fraction)
if left_to_right:
visible_idx = np.arange(0, len(preds) * use_fraction).astype("int")
else:
visible_idx = np.arange(len(preds) * use_fraction, len(preds)).astype("int")
fig, ax = plt.subplots(figsize=(10, 5))
ax.scatter(x, y, color="k", alpha=0.1)
ax.plot(x[visible_idx], preds[visible_idx], color="blue")
ax.set_title(f"frac = {use_fraction}")
fig.savefig(
f"ML-Basics/frames/{'ltr' if left_to_right else 'rtl'}_frame_{use_fraction}.png"
)
plt.close()
for f in np.arange(0.01, 1, 0.005):
gen_one_frame(use_fraction=f, left_to_right=True)
for f in np.arange(0.01, 1, 0.005):
gen_one_frame(use_fraction=f, left_to_right=False)
#%%
import glob
from PIL import Image
# filepaths
fp_in = "ML-Basics/frames/*.png"
fp_out = "ML-Basics/out_gif.gif"
imgs = (Image.open(f) for f in sorted(glob.glob(fp_in)))
img = next(imgs) # extract first image from iterator
img.save(fp=fp_out, format="GIF", append_images=imgs, save_all=True, duration=100, loop=0)
| moritzwilksch/DataScienceEducation | ML-Basics/fancy_gif.py | fancy_gif.py | py | 1,801 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_num... |
73979398907 | #!/usr/bin/env python3
import telebot
from telebot import types
import sqlite3
sqll = [0]
bot = telebot.TeleBot("TOKEN", parse_mode=None)
conn = sqlite3.connect('SQLdb.db', check_same_thread=False)
cursor = conn.cursor()
def updateUserBalance (id: int, balans: int):
cursor.execute('UPDATE users SET balans=? WHERE id=?', (balans, id))
conn.commit()
def createUser (id: int, user_name: str, user_login: str, balans: int):
cursor.execute('INSERT INTO users (id, user_name, user_login, balans) VALUES (?, ?, ?, ?)', (id, user_name, user_login, balans))
conn.commit()
def getUserBalans (id: int):
balans = cursor.execute('SELECT balans FROM users WHERE id = ?', (id,))
conn.commit()
return balans.fetchone()[0]
def getUserName (id: int):
userData = cursor.execute('SELECT user_name FROM users WHERE id = ?', (id,))
conn.commit()
return userData.fetchone()[0]
def selectAll_id ():
all_id = cursor.execute('SELECT id FROM users')
conn.commit()
return all_id.fetchall()
def idINFOMRER (ID): # вытаскиваем список ID-шников из картежа sqlite
allin = selectAll_id()
print(allin)
num = 0
# usId = call.from_user.id
print(ID, '- user')
new_base = []
for el in allin:
print(num)
print(ID, allin[num][0])
new_base.insert(num, allin[num][0]) #такой способ вытащить ID из кортежа
num = num+1
print(new_base)
print('==========================================')
return new_base
def select_all_base ():
all_base = cursor.execute('SELECT * FROM users')
conn.commit()
return all_base.fetchall()
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, "Этот бот призван читать круче Кендрика Ламара.")
stick = open('sticker.webp', 'rb')
bot.send_sticker(message.chat.id, stick)
markup = types.ReplyKeyboardRemove(selective=False)
markup = types.InlineKeyboardMarkup(row_width=1)
itembtm1 = types.InlineKeyboardButton('Правила', callback_data='inc_1')
itembtm2 = types.InlineKeyboardButton('Пользовательское соглашение', callback_data='inc_2')
itembtm3 = types.InlineKeyboardButton('Пополнение баланса', callback_data='inc_3')
itembtm4 = types.InlineKeyboardButton('Личный кабинет', callback_data='inc_4')
markup.add(itembtm1, itembtm2, itembtm3, itembtm4)
bot.send_message(message.chat.id, "Меню:", reply_markup=markup)
"""--------------------------- Обработчик кнопок -----------------------------------------------------------"""
@bot.callback_query_handler(func=lambda call: True)
def test_callback(call):
if call.message:
if call.data == "inc_0":
markup = types.InlineKeyboardMarkup(row_width=1)
itembtm1 = types.InlineKeyboardButton('Правила', callback_data='inc_1')
itembtm2 = types.InlineKeyboardButton('Пользовательское соглашение', callback_data='inc_2')
itembtm3 = types.InlineKeyboardButton('Пополнение баланса', callback_data='inc_3')
itembtm4 = types.InlineKeyboardButton('Личный кабинет', callback_data='inc_4')
markup.add(itembtm1, itembtm2, itembtm3, itembtm4)
bot.edit_message_text("Меню:", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "inc_1":
user_ID = call.from_user.id
if user_ID in idINFOMRER(user_ID):
print('Hello, friend')
else:
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
createUser(id=us_id, user_name=us_name, user_login=us_sname, balans=0)
print('new user')
# bot.send_message(call.from_user.id, 'Привет! Ваше имя добавленно в базу данных!')
markup = types.InlineKeyboardMarkup(row_width=1)
item1 = types.InlineKeyboardButton('Отправить отзыв', callback_data='la_2')
item2 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(item1, item2)
bot.edit_message_text("Мы тут не в игры играем, никаких правил", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "inc_2":
user_ID = call.from_user.id
if user_ID in idINFOMRER(user_ID):
print('Hello, friend')
else:
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
createUser(id=us_id, user_name=us_name, user_login=us_sname, balans=0)
print('new user')
# bot.send_message(call.from_user.id, 'Привет! Ваше имя добавленно в базу данных!')
markup = types.InlineKeyboardMarkup(row_width=1)
item_for_block_1 = types.InlineKeyboardButton('Инверсировать игру', callback_data='item_block_2')
item_for_block_2 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(item_for_block_1, item_for_block_2)
bot.edit_message_text("Можем просто заблокировать, если ты нам не понравишься", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "inc_3":
user_ID = call.from_user.id
if user_ID in idINFOMRER(user_ID):
print('Hello, friend')
else:
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
createUser(id=us_id, user_name=us_name, user_login=us_sname, balans=0)
# bot.send_message(call.from_user.id, 'Привет! Ваше имя добавленно в базу данных!')
print('new user')
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
currentUserBalance = getUserBalans(us_id)
# print(currentUserBalance.fetchone()[0])
currentUserBalance = currentUserBalance+100
updateUserBalance(id=us_id, balans=currentUserBalance)
markup = types.InlineKeyboardMarkup(row_width=1)
balansbtn = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(balansbtn)
bot.edit_message_text("Баланс пополнен!", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "inc_4":
user_ID = call.from_user.id
if user_ID in idINFOMRER(user_ID):
print('Hello, friend')
else:
us_id = call.from_user.id
us_name = call.from_user.first_name
us_sname = call.from_user.username
createUser(id=us_id, user_name=us_name, user_login=us_sname, balans=0)
print('new user')
# bot.send_message(call.from_user.id, 'Привет! Ваше имя добавленно в базу данных!')
if user_ID == 795675764 or user_ID == 5510951877:
markup = types.InlineKeyboardMarkup(row_width=1)
lkbtn1 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
lkbtn2 = types.InlineKeyboardButton('База данных', callback_data='admin_base')
markup.add(lkbtn1, lkbtn2)
us_id = call.from_user.id
bot.edit_message_text('Привет, создатель!\n'+'Игрок: '+str(getUserName(us_id))+'\nБаланс: '+str(getUserBalans(us_id)),
call.message.chat.id, call.message.message_id, reply_markup=markup)
else:
markup = types.InlineKeyboardMarkup(row_width=1)
lkbtn1 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(lkbtn1)
us_id = call.from_user.id
bot.edit_message_text('Игрок: '+str(getUserName(us_id))+'\nБаланс: '+str(getUserBalans(us_id)),
call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == "la_2":
markup = types.InlineKeyboardMarkup(row_width=1)
otzyv = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(otzyv)
bot.edit_message_text("info@xcloudclub.com", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == 'item_block_2':
markup = types.InlineKeyboardMarkup(row_width=1)
inv_1 = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(inv_1)
bot.edit_message_text("Инверсированно", call.message.chat.id, call.message.message_id, reply_markup=markup)
elif call.data == 'admin_base':
my_list = []
for x in select_all_base():
my_list.append(''.join(str(x))) #https://ru.stackoverflow.com/questions/1178388
my_str = '\n'.join(my_list)
# print(select_all_base())
# for item in select_all_base():
# print(item)
markup = types.InlineKeyboardMarkup(row_width=1)
button_back = types.InlineKeyboardButton('Назад', callback_data='inc_0')
markup.add(button_back)
bot.edit_message_text('База данных\n'+my_str, call.message.chat.id, call.message.message_id, reply_markup=markup)
"""------------------------- Обработчики текста и стикеров --------------------------------------------------"""
@bot.message_handler(content_types=['text'])
def text_up(message):
bot.reply_to(message, message.text)
bot.send_message(message.chat.id, "Давай без самодеятельности. Мы для кого кнопки сделали?")
@bot.message_handler(content_types=['sticker'])
def text_down(message):
bot.send_message(message.chat.id, "Козырный стикер!")
"""--------------------------------- Start ------------------------------------------------------------------"""
bot.infinity_polling()
| thebilderberg/telegram_bot_github | star_bot.py | star_bot.py | py | 10,747 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "telebot.TeleBot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "telebot.types.ReplyKeyboardRemove",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "tele... |
10918154737 | import os
import ast
import subprocess
import uuid
import json
import hashlib
import socket
import psutil
from ipykernel.ipkernel import IPythonKernel
def make_except_safe(code):
code = code.replace('\n', '\n ')
code = 'try:\n ' + code
code = code + '\nexcept: pass\n'
try:
ast.parse(code)
return code
except:
return ''
SCIUNIT_HOME = os.path.expanduser('~/sciunit/')
SCIUNIT_PROJECT_FILE = os.path.join(SCIUNIT_HOME, '.activated')
SCIUNIT_SOCKET_FILE = os.path.join(SCIUNIT_HOME, 'listener.socket')
class SciunitKernel(IPythonKernel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
implementation = super().implementation + ' sciunit'
if (os.path.exists(SCIUNIT_PROJECT_FILE)):
self.project = open(SCIUNIT_PROJECT_FILE).read().strip()
self.project_name = os.path.basename(os.path.normpath(self.project))
if (os.path.exists(os.path.join(self.project, 'kernel'))):
self.recording = False
else:
self.recording = True
open(os.path.join(self.project, 'kernel'), 'w').write(json.dumps([]))
else:
self.project_name = 'Project_' + str(uuid.uuid4())
self.project = os.path.join(SCIUNIT_HOME, self.project_name)
subprocess.run(['sciunit', 'create', self.project_name])
self.recording = True
open(os.path.join(self.project, 'kernel'), 'w').write(json.dumps([]))
self.eid = 1
self.file = os.path.join(self.project, 'run.py')
self.valid = True
files = psutil.Process().open_files()
for file in files:
os.close(file.fd)
criu_path = os.path.join(self.project, 'criu0')
data = ['Dump', os.getpid(), os.getppid(), criu_path, 0]
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client.connect(SCIUNIT_SOCKET_FILE)
client.sendall(json.dumps(data).encode())
client.close()
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
criu_path = os.path.join(self.project, f'criu{self.eid}')
if (os.path.exists(criu_path)): self.recording = False
hashes = json.loads(open(os.path.join(self.project, 'kernel')).read())
if not self.recording and (len(hashes) == self.eid - 1): self.valid = False
data = []
if self.valid:
with open(self.file[1], 'a') as file:
safe_code = make_except_safe(code)
if safe_code:
if self.recording:
print('Recording e{}'.format(self.eid))
open(self.file, 'a').write(safe_code)
subprocess.Popen(['sciunit', 'exec', 'python3', self.file], stdout=subprocess.PIPE).communicate()
hashes.append(hashlib.sha256(safe_code.encode()).hexdigest())
open(os.path.join(self.project, 'kernel'), 'w').write(json.dumps(hashes))
data = ['Dump', os.getpid(), os.getppid(), criu_path, self.eid]
else:
if (hashlib.sha256(safe_code.encode()).hexdigest() != hashes[self.eid - 1]):
print('Invalid, stopped repeating')
self.valid = False
else:
print('Valid, repeating e{}'.format(self.eid))
subprocess.Popen(['sciunit', 'repeat', 'e{}'.format(self.eid)], stdout=subprocess.PIPE).communicate()
data = ['Restore', os.getpid(), os.getppid(), criu_path, self.eid]
self.eid += 1
output = super().do_execute(code, silent, False, user_expressions, allow_stdin)
if data:
client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client.connect(SCIUNIT_SOCKET_FILE)
client.sendall(json.dumps(data).encode())
client.close()
# TODO: Wait without Socket
return output
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=SciunitKernel)
| depaul-dice/sciunit-NBv1 | __main__.py | __main__.py | py | 4,282 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ast.parse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_nu... |
23061764300 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request
import re
import os
import random
import time
from time import sleep
import json
from collections import OrderedDict
def dropSameListEle(inList):
outList=[]
for x in inList:
if x not in outList and x != '':
outList.append(x)
return outList
class FWIKI:
#初始化,传入起始页码,截止页码
def __init__(self):
self.baseUrl="http://fgowiki.com/guide/petdetail/"
#抓取页面
def getPage(self,url):
try:
request=urllib.request.Request(url)
response=urllib.request.urlopen(request)
page=response.read().decode('utf-8')
return page
except (urllib.request.URLError,e):
print('erro')
if hasattr(e,'reason'):
print('reason',e.reason)
return None
#提取信息
def getInf(self,regExpress,page,pos):
pattern=re.compile(regExpress,re.S)
result=re.search(pattern,page)
if result:
result = result.group(pos).strip()
result = re.sub(r'・',r'·',result)
result = re.sub(r'〔(.*?)〕',r'(\1)',result)
result = re.sub(r'((.*?))',r'(\1)',result)
return result
else:
return None
f=FWIKI()
whiteList=[83,149,151,152,168]
startPage=1
endPage=182
skillList=[]
pSkillList=[]
NPList=[]
nameDict=OrderedDict()
while startPage<=endPage:
try:
if startPage in whiteList:
startPage = startPage + 1
continue
url=f.baseUrl+str(startPage)
page=f.getPage(url)
page=page.encode().decode('unicode_escape')
name=f.getInf(r'"NAME":"(.*?)"',page,1)
nameDict[startPage]=name
skill=f.getInf(r'"SKILL_R1":"(.*?)"',page,1)
skillList.append(skill)
skill=f.getInf(r'"SKILL_R2":"(.*?)"',page,1)
skillList.append(skill)
skill=f.getInf(r'"SKILL_R3":"(.*?)"',page,1)
skillList.append(skill)
np= f.getInf(r'"T_NAME":"(.*?)"',page,1)
np = re.sub(r'\(.*?\)','',np)
NPList.append(np)
pSkill=f.getInf(r'"CSKILL_R1":"(.*?)"',page,1)
pSkillList.append(pSkill)
pSkill=f.getInf(r'"CSKILL_R2":"(.*?)"',page,1)
pSkillList.append(pSkill)
pSkill=f.getInf(r'"CSKILL_R3":"(.*?)"',page,1)
pSkillList.append(pSkill)
pSkill=f.getInf(r'"CSKILL_R4":"(.*?)"',page,1)
pSkillList.append(pSkill)
print(str(startPage))
if startPage <= endPage:
sleep(random.uniform(3,5))
startPage = startPage + 1
except Exception as e:
print('Error:',e)
if startPage<=endPage:
sleep(random.uniform(2,3))
NPList=dropSameListEle(NPList)
skillList=dropSameListEle(skillList)
pSkillList=dropSameListEle(pSkillList)
lines='var servantsDict = {\n'
for x in nameDict:
lines+='\t"'+str(x)+'" : "'+nameDict[x]+'",\n'
lines+='};\n\n\n\n'
lines+='var noblePhantasmsDict = {\n'
for x in NPList:
lines+='\t"" : "'+str(x)+'",\n'
lines+='};\n\n\n\n'
lines+='var skillsDict = {\n'
for x in skillList:
lines+='\t"" : "'+str(x)+'",\n'
lines+='};\n\n\n\n'
lines+='var passiveSkillsDict = {\n'
for x in pSkillList:
lines+='\t"" : "'+str(x)+'",\n'
lines+='};\n\n\n\n'
with open('servants_new.json','w+',encoding='utf-8') as wpoint:
wpoint.write(lines)
print('Task is finished!')
| pplost/for-test | tools/新建文件夹/fetch - 副本.py | fetch - 副本.py | py | 3,180 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.request.Request",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 30,
"usage_type": "name"
},
{
"api_nam... |
32505732795 | # -*- coding: utf-8 *-
import pprint
import re
import sys
import importlib
from Symfopy.Component.HttpFoundation import Request, Response
class Router(object):
var_regex = re.compile(r'\{(\w+)(?::([^}]+))?\}')
def __init__(self, routes = {}):
self.routes = dict()
for name in routes:
vars = routes[name].get('defaults', {})
self.add_route(name, routes[name]['route'],\
routes[name]['controller'], **vars)
def load_controller(self, string):
module_name, func_name = string.split(':', 1)
module = importlib.import_module(module_name)
#__import__(module_name)
#module = sys.modules[module_name]
func = getattr(module, func_name)
return func
def add_route(self, name, route, controller, **vars):
#if isinstance(controller, basestring):
# controller = self.load_controller(controller)
self.routes[name] = (re.compile(self.template_to_regex(route)),
controller, vars)
@staticmethod
def template_to_regex(template):
regex = ''
last_pos = 0
for match in Router.var_regex.finditer(template):
regex += re.escape(template[last_pos:match.start()])
var_name = match.group(1)
expr = match.group(2) or '[^/]+'
expr = '(?P<%s>%s)' % (var_name, expr)
regex += expr
last_pos = match.end()
regex += re.escape(template[last_pos:])
regex = '^%s$' % regex
return regex
def __str__(self):
return pprint.pformat(self.__dict__)
@staticmethod
def notfound(message = None, **kwargs):
content = ['<h1>Not Found</h1>']
if isinstance(message, basestring):
content.append('<p>'+ message + '</p>')
elif isinstance(message, list):
for x in message:
if isinstance(x, basestring):
content.append('<p>'+ x + '</p>')
return Response(content, 404)
def rest_controller(cls):
def replacement(request, **urlvars):
action = urlvars.get('action', None)
if action:
action += '_' + request.get_method().lower()
urlvars.pop('action')
else:
if isinstance(action, basestring):
urlvars.pop('action')
action = request.get_method().lower()
instance = cls(**urlvars)
try:
method = getattr(instance, action)
except Exception:
return Router.notfound('No action ' + action)
return method(request)
return replacement
def rest_controller_template(cls):
def replacement(request, template = None, **urlvars):
action = urlvars.get('action', None)
if action:
action += '_' + request.get_method().lower()
urlvars.pop('action')
else:
if isinstance(action, basestring):
urlvars.pop('action')
action = request.get_method().lower()
instance = cls(**urlvars)
try:
method = getattr(instance, action)
except Exception:
return Router.notfound('No action ' + action)
if template:
return method(request, template)
else:
return method(request)
replacement.member_func = cls
return replacement
| alculquicondor/Symfopy | vendor/Symfopy/Component/Routing.py | Routing.py | py | 3,380 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.escape",
"line_n... |
41860678548 | import logging
log = logging.getLogger(__name__)
import re
import requests
from bs4 import BeautifulSoup
try:
# Python 2 has a standard urlparse library
from urlparse import urlparse, ParseResult
except:
# Python 3 has the same library hidden in urllib.parse
from urllib.parse import urlparse, ParseResult
MAX_FILEIZE = 2**19 # bytes; this is .5MB
MAX_CONNECTIONTIME = 20 # in seconds
RE_bad_title = re.compile(
"""(?:<title>|<title>)(.*)(?:<?/title>|(?:<)?/title>)""", re.I)
REGEX_doctype = re.compile("^\s*<!DOCTYPE[^>]*>", re.IGNORECASE)
RE_whitespace = re.compile("\s+")
PARSE_SAFE_FILES = ('html', 'txt', 'json', 'htm', 'xml',
'php', 'asp', 'aspx', 'ece', 'xhtml', 'cfm', 'cgi')
# based on DJANGO
# https://github.com/django/django/blob/master/django/core/validators.py
# not testing ipv6 right now, because rules are needed for ensuring they are correct
RE_VALID_HOSTNAME = re.compile(
r'(?:'
r'(?P<ipv4>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ipv4
r'|'
# r'(?P<ipv6>\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
# r'|'
r'(?P<localhost>localhost)' # localhost...
r'|'
r'(?P<domain>([A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?))' # domain...
r'(?P<port>:\d+)?' # optional port
r')', re.IGNORECASE)
RE_PORT = re.compile(
r'^'
r'(?P<main>.+)'
r':'
r'(?P<port>\d+)'
r'$', re.IGNORECASE
)
RE_DOMAIN_NAME = re.compile(
r"""(^
(?:
[A-Z0-9]
(?:
[A-Z0-9-]{0,61}
[A-Z0-9]
)?
\.
)+
(?:
[A-Z]{2,6}\.?
|
[A-Z0-9-]{2,}
(?<!-)\.?)
$)""",
re.VERBOSE | re.IGNORECASE)
RE_IPV4_ADDRESS = re.compile(
r'^(\d{1,3})\.(\d{1,3}).(\d{1,3}).(\d{1,3})$' # grab 4 octets
)
RE_ALL_NUMERIC = re.compile("^[\d\.]+$")
def is_parsed_valid_url(parsed, require_public_netloc=True, http_only=True):
"""returns bool
`http_only`
defaults True
requires http or https for the scheme
"""
assert isinstance(parsed, ParseResult)
log.debug("is_parsed_valid_url = %s", parsed)
if not all((parsed.scheme, parsed.netloc)):
log.debug(" FALSE - missing `scheme` or `netloc`")
return False
if http_only:
if parsed.scheme not in ('http', 'https'):
log.debug(" FALSE - invalid `scheme`")
return False
if require_public_netloc:
log.debug(" validating netloc")
_netloc_match = RE_VALID_HOSTNAME.match(parsed.netloc)
if not _netloc_match:
log.debug(" did not match regex")
return False
# we may assign these
_netloc_clean = parsed.netloc
_port = None
_netloc_ported = RE_PORT.match(parsed.netloc)
if _netloc_ported:
_netloc_ported_groudict = _netloc_ported.groupdict()
_netloc_clean = _netloc_ported_groudict['main']
_port = _netloc_ported_groudict['port']
_netloc_groudict = _netloc_match.groupdict()
if _netloc_groudict['ipv4'] is not None:
octets = RE_IPV4_ADDRESS.match(_netloc_clean)
if octets:
log.debug(" validating against ipv4")
for g in octets.groups():
g = int(g)
if int(g) > 255:
log.debug(" invalid ipv4; encountered an octect > 255")
return False
log.debug(" valid ipv4")
return True
log.debug(" invalid ipv4")
return False
else:
if _netloc_clean == 'localhost':
log.debug(" localhost!")
return True
if RE_ALL_NUMERIC.match(_netloc_clean):
log.debug(" This only has numeric characters. "
"this is probably a fake or typo ip address.")
return False
if _port:
try:
_port = int(_port)
if parsed.port != _port:
log.debug(" netloc.port does not match our regex _port")
return False
except:
raise
log.debug(" _port is not an int")
return False
if RE_DOMAIN_NAME.match(_netloc_clean):
log.debug(" valid public domain name format")
return True
log.debug(" this appears to be invalid")
return False
return True
def is_parsed_valid_relative(parsed):
"""returns bool"""
assert isinstance(parsed, ParseResult)
if parsed.path and not any((parsed.scheme, parsed.hostname)):
return True
return False
def parsed_to_relative(parsed):
"""turns a parsed url into a full relative url"""
assert isinstance(parsed, ParseResult)
_path = parsed.path
# cleanup, might be unnecessary now
if _path and _path[0] != "/":
# prepend a slash
_path = "/%s" % _path
if parsed.query:
_path += "?" + parsed.query
if parsed.fragment:
_path += "#" + parsed.fragment
return _path
def is_url_valid(url, require_public_netloc=None):
"""
tries to parse a url. if valid returns `ParseResult`
(boolean eval is True); if invalid returns `False`
"""
if url is None:
return False
parsed = urlparse(url)
if is_parsed_valid_url(parsed, require_public_netloc=require_public_netloc):
return parsed
return False
def url_to_absolute_url(url_test, url_fallback=None, require_public_netloc=None):
"""
returns an "absolute url" if we have one.
if we don't, it tries to fix the current url based on the fallback
this shouldn't be needed, but it is.
called by:
MetadataParser.absolute_url()
MetadataParser.get_discrete_url()
args:
`url_test` - the url to return/fix
`url_fallback` - a fallback url. this is returned in VERY bad
errors. in "not so bad" errors, this is parsed and used as the
base to construct a new url.
`require_public_netloc` - requires the hostname/netloc to be a
valid IPV4 or public dns domain name
"""
if url_test is None and url_fallback is not None:
return url_fallback
parsed = urlparse(url_test)
_path = parsed.path
if _path:
# sanity check
# some stock plugins create invalid urls/files like '/...' in meta-data
if _path[0] != "/":
# prepend a slash
_path = "/%s" % _path
known_invalid_plugins = ['/...', ]
if _path in known_invalid_plugins:
return url_fallback
# finally, fix the path
# this isn't nested, because we could have kwargs
_path = parsed_to_relative(parsed)
if not _path:
# so if our _path is BLANK, fuck it.
# this can happen if someone puts in "" for the canonical
return url_fallback
rval = None
# we'll use a placeholder for a source 'parsed' object that has a domain...
parsed_domain_source = None
# if we have a valid URL (OMFG, PLEASE)...
if is_parsed_valid_url(parsed, require_public_netloc=require_public_netloc):
parsed_domain_source = parsed
else:
# ok, the URL isn't valid
# can we re-assemble it
if url_fallback:
parsed_fallback = urlparse(url_fallback)
if is_parsed_valid_url(
parsed_fallback,
require_public_netloc=require_public_netloc
):
parsed_domain_source = parsed_fallback
if parsed_domain_source:
rval = "%s://%s%s" % (
parsed_domain_source.scheme,
parsed_domain_source.netloc, _path)
return rval
class NotParsable(Exception):
def __init__(self, message='', raised=None, code=None):
self.message = message
self.raised = raised
self.code = code
def __str__(self):
return "ApiError: %s | %s | %s" % (self.message, self.code, self.raised)
class NotParsableFetchError(NotParsable):
pass
class MetadataParser(object):
"""
turns text or a URL into a dict of dicts, extracting as much relevant
metadata as possible.
the 'keys' will be either the 'name' or 'property' attribute of the node.
we EXPECT/REQUIRE a `head` in the document.
the attribute's prefix are removed when storing into it's bucket
eg:
og:title -> 'og':{'title':''}
metadata is stored into subgroups:
page
extracted from page elements
saved into MetadataParser.metadata['page']
example:
<head><title>Awesome</title></head>
MetadataParser.metadata = {'page': {'title':'Awesome'}}
opengraph
has 'og:' prefix
saved into MetadataParser.metadata['og']
example:
<meta property="og:title" content="Awesome"/>
MetadataParser.metadata = {'og': {'og:title':'Awesome'}}
dublin core
has 'dc:' prefix
saved into MetadataParser.metadata['dc']
example:
<meta property="dc:title" content="Awesome"/>
MetadataParser.metadata = {'dc': {'dc:title':'Awesome'}}
meta
has no prefix
saved into MetadataParser.metadata['meta']
example:
<meta property="title" content="Awesome"/>
MetadataParser.metadata = {'meta': {'dc:title':'Awesome'}}
NOTE:
passing in ssl_verify=False will turn off ssl verification checking
in the requests library.
this can be necessary on development machines
"""
url = None
url_actual = None
strategy = None
metadata = None
LEN_MAX_TITLE = 255
only_parse_file_extensions = None
require_public_netloc = None
force_doctype = None
requests_timeout = None
# allow for the beautiful_soup to be saved
soup = None
og_minimum_requirements = ['title', 'type', 'image', 'url']
twitter_sections = ['card', 'title', 'site', 'description']
strategy = ['og', 'dc', 'meta', 'page']
def __init__(
self,
url=None, html=None, strategy=None, url_data=None, url_headers=None,
force_parse=False, ssl_verify=True, only_parse_file_extensions=None,
force_parse_invalid_content_type=False, require_public_netloc=True,
force_doctype=False, requests_timeout=None,
):
"""
creates a new `MetadataParser` instance.
kwargs:
`url`
url to parse
`html`
instead of a url, parse raw html
`strategy`
default: None
sets default metadata strategy (['og', 'dc', 'meta', 'page'])
see also `MetadataParser.get_metadata()`
`url_data`
data passed to `requests` library as `params`
`url_headers`
data passed to `requests` library as `headers`
`force_parse`
default: False
force parsing invalid content
`ssl_verify`
default: True
disable ssl verification, sometimes needed in development
`only_parse_file_extensions`
default: None
set a list of valid file extensions.
see `metadata_parser.PARSE_SAFE_FILES` for an example list
`force_parse_invalid_content_type`
default: False
force parsing invalid content types
by default this will only parse text/html content
`require_public_netloc`
default: True
require a valid `netloc` for the host. if `True`, valid hosts
must be a properly formatted public domain name, IPV4 address
or "localhost"
`force_doctype`
default: False
if set to true, will replace a doctype with 'html'
why? some cms give a bad doctype (like nasa.gov)
which can break lxml/bsd
`requests_timeout`
default: None
if set, proxies the value into `requests.get` as `timeout`
"""
self.metadata = {
'og': {},
'meta': {},
'dc': {},
'page': {},
'twitter': {}
}
if strategy:
self.strategy = strategy
if url is not None:
url = url.strip()
self.url = url
self.url_actual = url
self.ssl_verify = ssl_verify
self.soup = None
self.force_doctype = force_doctype
self.response = None
self.response_headers = {}
self.require_public_netloc = require_public_netloc
self.requests_timeout = requests_timeout
if only_parse_file_extensions is not None:
self.only_parse_file_extensions = only_parse_file_extensions
if html is None:
html = self.fetch_url(
url_data=url_data, url_headers=url_headers,
force_parse=force_parse,
force_parse_invalid_content_type=force_parse_invalid_content_type
)
self.parser(html, force_parse=force_parse)
def is_opengraph_minimum(self):
"""
returns true/false if the page has the minimum amount of opengraph tags
"""
return all([hasattr(self, attr)
for attr in self.og_minimum_requirements])
def fetch_url(
self,
url_data=None, url_headers=None, force_parse=False,
force_parse_invalid_content_type=False
):
"""
fetches the url and returns it.
this was busted out so you could subclass.
"""
# should we even download/parse this?
if not force_parse and self.only_parse_file_extensions is not None:
parsed = urlparse(self.url)
path = parsed.path
if path:
url_fpath = path.split('.')
if len(url_fpath) == 0:
# i have no idea what this file is, it's likely using a
# directory index
pass
elif len(url_fpath) > 1:
url_fext = url_fpath[-1]
if url_fext in self.only_parse_file_extensions:
pass
else:
raise NotParsable("I don't know what this file is")
# borrowing some ideas from
# http://code.google.com/p/feedparser/source/browse/trunk/feedparser/feedparser.py#3701
if not url_headers:
url_headers = {}
# if someone does usertracking with sharethis.com, they get a hashbang
# like this: http://example.com/page#.UHeGb2nuVo8
# that fucks things up.
url = self.url.split('#')[0]
r = None
try:
# requests gives us unicode and the correct encoding, yay
r = requests.get(
url, params=url_data, headers=url_headers,
allow_redirects=True, verify=self.ssl_verify,
timeout=self.requests_timeout, stream=True,
)
content_type = None
if 'content-type' in r.headers:
content_type = r.headers['content-type']
# content type can have a character encoding in it...
content_type = [i.strip() for i in content_type.split(';')]
content_type = content_type[0].lower()
if (
(
(content_type is None)
or
(content_type != 'text/html')
)
and
(not force_parse_invalid_content_type)
):
raise NotParsable("I don't know what type of file this is! "
"content-type:'[%s]" % content_type)
# okay, now we need to read
## TODO
## TODO
## TODO
## TODO
html = r.text
self.response = r
# lowercase all of the HTTP headers for comparisons per RFC 2616
self.response_headers = dict((k.lower(), v)
for k, v in r.headers.items())
self.url_actual = r.url
if r.status_code != 200:
raise NotParsableFetchError(
message="Status Code is not 200",
code=r.status_code
)
except requests.exceptions.RequestException as error:
raise NotParsableFetchError(
message="Error with `requests` library. Inspect the `raised`"
" attribute of this error.",
raised=error
)
return html
def absolute_url(self, link=None):
"""
makes the url absolute, as sometimes people use a relative url. sigh.
"""
url_fallback = self.url_actual or self.url or None
return url_to_absolute_url(
link,
url_fallback=url_fallback,
require_public_netloc=self.require_public_netloc
)
def parser(self, html, force_parse=False):
"""parses the html
"""
if not isinstance(html, BeautifulSoup):
# clean the html?
if self.force_doctype:
html = REGEX_doctype.sub("<!DOCTYPE html>", html)
try:
doc = BeautifulSoup(html, "lxml")
except:
doc = BeautifulSoup(html, "html.parser")
else:
doc = html
# let's ensure that we have a real document...
if not doc or not doc.html or not doc.html.head:
return
# stash the bs4 doc for further operations
self.soup = doc
ogs = doc.html.head.findAll(
'meta',
attrs={'property': re.compile(r'^og')}
)
for og in ogs:
try:
self.metadata['og'][og['property'][3:]] = og['content'].strip()
except (AttributeError, KeyError):
pass
except:
log.debug("Ran into a serious error parsing `og`")
pass
twitters = doc.html.head.findAll(
'meta',
attrs={'name': re.compile(r'^twitter')}
)
for twitter in twitters:
try:
self.metadata['twitter'][
twitter['name'][8:]] = twitter['value'].strip()
except (AttributeError, KeyError):
pass
# pull the text off the title
try:
_title_text = doc.html.head.title.text
if len(_title_text) > self.LEN_MAX_TITLE:
_title_text = _title_text[:self.LEN_MAX_TITLE]
self.metadata['page']['title'] = _title_text
except AttributeError:
pass
# is there an image_src?
images = doc.findAll(
'link',
attrs={'rel': re.compile("^image_src$", re.I)}
)
if images:
image = images[0]
if image.has_attr("href"):
img_url = image['href'].strip()
self.metadata['page']['image'] = img_url
elif image.has_attr("content"):
img_url = image['content'].strip()
self.metadata['page']['image'] = img_url
else:
pass
# figure out the canonical url
canonicals = doc.findAll(
'link',
attrs={'rel': re.compile("^canonical$", re.I)}
)
if canonicals:
canonical = canonicals[0]
if canonical.has_attr("href"):
link = canonical['href'].strip()
self.metadata['page']['canonical'] = link
elif canonical.has_attr("content"):
link = canonical['content'].strip()
self.metadata['page']['canonical'] = link
else:
pass
# pull out all the metadata
meta = doc.html.head.findAll(name='meta')
for m in meta:
try:
k = None
v = None
attrs = m.attrs
k = None
if 'name' in attrs:
k = 'name'
elif 'property' in attrs:
k = 'property'
elif 'http-equiv' in attrs:
k = 'http-equiv'
if k:
k = attrs[k].strip()
if 'content' in attrs:
v = attrs['content'].strip()
if (len(k) > 3) and (k[:3] == 'dc:'):
self.metadata['dc'][k[3:]] = v
else:
self.metadata['meta'][k] = v
except AttributeError:
pass
def get_metadata(self, field, strategy=None):
"""
looks for the field in various stores. defaults to the core
strategy, though you may specify a certain item. if you search for
'all' it will return a dict of all values.
"""
if strategy:
_strategy = strategy
else:
_strategy = self.strategy
if _strategy == 'all':
rval = {}
for store in self.metadata:
if field in self.metadata[store]:
rval[store] = self.metadata[store][field]
return rval
for store in _strategy:
if store in self.metadata:
if field in self.metadata[store]:
return self.metadata[store][field]
return None
def get_discrete_url(
self,
og_first=True, canonical_first=False, allow_invalid=False
):
"""convenience method.
if `allow_invalid` is True, it will return the raw data.
if `allow_invalid` is False (default), it will try to correct
the data (relative to absolute) or reset to None.
"""
og = self.get_metadata('url', strategy=['og'])
canonical = self.get_metadata('canonical', strategy=['page'])
if not allow_invalid:
# fallback url is used to drop a domain
url_fallback = self.url_actual or self.url or None
if og and not is_url_valid(
og,
require_public_netloc=self.require_public_netloc
):
# try making it absolute
og = url_to_absolute_url(
og,
url_fallback=url_fallback,
require_public_netloc=self.require_public_netloc
)
if not is_url_valid(
og,
require_public_netloc=self.require_public_netloc
):
# set to NONE if invalid
og = None
if canonical and not is_url_valid(
canonical,
require_public_netloc=self.require_public_netloc
):
# try making it absolute
canonical = url_to_absolute_url(
canonical,
url_fallback=url_fallback,
require_public_netloc=self.require_public_netloc
)
if not is_url_valid(
canonical,
require_public_netloc=self.require_public_netloc
):
# set to NONE if invalid
canonical = None
rval = []
if og_first:
rval = (og, canonical)
elif canonical_first:
rval = (canonical, og)
for i in rval:
if i:
return i
return self.absolute_url()
def get_metadata_link(self, field, strategy=None):
"""sometimes links are bad; this tries to fix them. most useful for meta images"""
# `_value` will be our raw value
_value = self.get_metadata(field, strategy=strategy)
if not _value:
return None
# `value` will be our clean value
# remove whitespace, because some bad blogging platforms add in whitespace by printing elements on multiple lines. d'oh!
value = RE_whitespace.sub('', _value)
# if the url is valid, RETURN IT
if is_url_valid(value, require_public_netloc=self.require_public_netloc):
return value
# fallback url is used to drop a domain
url_fallback = self.url_actual or self.url or None
# try making it absolute
value_fixed = url_to_absolute_url(
value,
url_fallback = url_fallback,
require_public_netloc = self.require_public_netloc
)
if is_url_valid(value_fixed, require_public_netloc=self.require_public_netloc):
return value_fixed
return None
| xethorn/metadata_parser | metadata_parser/__init__.py | __init__.py | py | 25,325 | python | en | code | null | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number":... |
25226116736 | from sklearn import datasets
import pandas as pd
iris = datasets.load_iris()
iris_df = pd.DataFrame(iris.data)
iris_df.columns = iris.feature_names
iris_df['target'] = iris.target
# original target = 0,1,2 int32
print(iris_df.target)
# changing them by using DF.astype(type)
print(iris_df.target.astype(float)) | HawkingLaugh/Data-Processing-Using-Python | Week4/28. inconsistent_data_handling.py | 28. inconsistent_data_handling.py | py | 313 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 5,
"usage_type": "call"
}
] |
34197263982 | import sys, math
from math import pi as pi
import numpy as np
import cv2
from PyQt5.QtCore import QPoint, QRect, QSize, Qt, QPointF, QRectF, pyqtSignal, QTimer
from PyQt5.QtGui import (QBrush, QConicalGradient, QLinearGradient, QPainter, QPainterPath, QPalette, QPen, QPixmap, QPolygon, QRadialGradient, QColor, QTransform, QPolygonF, QKeySequence, QIcon)
from PyQt5.QtWidgets import (QApplication, QProgressBar, QCheckBox, QComboBox, QVBoxLayout, QHBoxLayout, QGridLayout, QLabel, QSpinBox, QWidget, QPushButton, QSpacerItem, QSizePolicy, QLCDNumber )
from PyQt5 import QtGui, QtCore
from parallelIce.pose3dClient import Pose3DClient
from parallelIce.laserClient import LaserClient
import easyiceconfig as EasyIce
from gui.threadGUI import ThreadGUI
class MainWindow(QWidget):
updGUI=pyqtSignal()
def __init__(self, pose3d, laser1, laser2, laser3, parent=None):
super(MainWindow, self).__init__(parent)
layout = QGridLayout()
self.quesito = quesoWidget(self, pose3d)
self.tiempo = tiempoWidget(self)
self.calidad = calidadWidget(self, laser1, laser2, laser3)
self.distancia = distanciaWidget(self, pose3d)
self.nota = notaWidget(self,pose3d, self.tiempo, self.calidad, self.distancia)
self.logo = logoWidget(self)
layout.addWidget(self.quesito,1,0)
layout.addWidget(self.tiempo,0,0)
layout.addWidget(self.distancia,0,2)
layout.addWidget(self.calidad,1,2)
layout.addWidget(self.nota,0,1)
layout.addWidget(self.logo,2,2)
vSpacer = QSpacerItem(30, 50, QSizePolicy.Ignored, QSizePolicy.Ignored)
layout.addItem(vSpacer,1,0)
self.setFixedSize(940,640);
self.setLayout(layout)
self.updGUI.connect(self.update)
def update(self):
self.quesito.updateG()
self.distancia.updateG()
self.calidad.updateG()
self.nota.updateG()
class logoWidget(QWidget):
def __init__(self, winParent):
super(logoWidget, self).__init__()
self.winParent=winParent
self.logo = cv2.imread("resources/logo_jderobot1.png",cv2.IMREAD_UNCHANGED)
self.logo = cv2.resize(self.logo, (100, 100))
image = QtGui.QImage(self.logo.data, self.logo.shape[1], self.logo.shape[0], QtGui.QImage.Format_ARGB32);
self.pixmap = QtGui.QPixmap.fromImage(image)
self.height = self.pixmap.height()
self.width = self.pixmap.width()
self.mapWidget = QLabel(self)
self.mapWidget.setPixmap(self.pixmap)
self.mapWidget.resize(self.width, self.height)
self.setMinimumSize(100,100)
class calidadWidget(QWidget):
def __init__(self,winParent, laser1, laser2, laser3):
super(calidadWidget, self).__init__()
self.winParent=winParent
self.laser1 = laser1
self.laser2 = laser2
self.laser3 = laser3
self.numCrash = 0
self.MAX_CRASH = 1000
vLayout = QVBoxLayout()
choquesLabel = QLabel("Choques:")
self.bar = QProgressBar()
self.bar.setValue(self.numCrash)
st = "QProgressBar::chunk {background-color: #ff0000;}\n QProgressBar {border: 1px solid grey;border-radius: 2px;text-align: center;background: #eeeeee;}"
self.bar.setStyleSheet(st)
self.bar.setTextVisible(False)
vLayout.addWidget(choquesLabel, 0)
vLayout.addWidget(self.bar, 0)
vSpacer = QSpacerItem(30, 80, QSizePolicy.Ignored, QSizePolicy.Ignored)
vLayout.addItem(vSpacer)
self.setLayout(vLayout)
def get_laser_distance(self, laser):
DIST = 15
maxAngle = 180
crash = False
for i in range(0, maxAngle+1):
# Distance in millimeters, we change to cm
laserI = float(laser.distanceData[i])/float(10)
if i != 0 and i != 180:
if laserI <= DIST:
crash = True
return crash
def updateG(self):
laser_data_Front = self.laser1.getLaserData()
laser_data_Rear = self.laser2.getLaserData()
laser_data_Right = self.laser3.getLaserData()
crashFront = self.get_laser_distance(laser_data_Front)
crashRear = self.get_laser_distance(laser_data_Rear)
crashRight = self.get_laser_distance(laser_data_Right)
if crashFront or crashRear or crashRight:
self.numCrash = self.numCrash + 1
percentajeCrash = self.numCrash * 100/self.MAX_CRASH
self.bar.setValue(self.numCrash)
self.update()
class distanciaWidget(QWidget):
def __init__(self,winParent, pose3d):
super(distanciaWidget, self).__init__()
self.winParent=winParent
self.pose3d = pose3d
self.distFrontFinal = 0
self.distRearFinal = 0
self.distanceSidewalk = 0
vLayout = QVBoxLayout()
self.distances()
distancesLabel = QLabel("Distancias:")
self.distanceFrontalLabel = QLabel("Distancia frontal: " + str(round(self.distFrontFinal, 3)) + ' m')
self.distanceRearLabel = QLabel("Distancia trasera: " + str(round(self.distRearFinal, 3)) + ' m')
self.distanceSidewalkLabel = QLabel("Distancia a la acera: " + str(round(self.distanceSidewalk, 3)) + ' m')
vLayout.addWidget(distancesLabel, 0)
vLayout.addWidget(self.distanceFrontalLabel, 0)
vLayout.addWidget(self.distanceRearLabel, 0)
vLayout.addWidget(self.distanceSidewalkLabel, 0)
self.setLayout(vLayout)
def RTx(self, angle, tx, ty, tz):
RT = np.matrix([[1, 0, 0, tx], [0, math.cos(angle), -math.sin(angle), ty], [0, math.sin(angle), math.cos(angle), tz], [0,0,0,1]])
return RT
def RTy(self, angle, tx, ty, tz):
RT = np.matrix([[math.cos(angle), 0, math.sin(angle), tx], [0, 1, 0, ty], [-math.sin(angle), 0, math.cos(angle), tz], [0,0,0,1]])
return RT
def RTz(self, angle, tx, ty, tz):
RT = np.matrix([[math.cos(angle), -math.sin(angle), 0, tx], [math.sin(angle), math.cos(angle),0, ty], [0, 0, 1, tz], [0,0,0,1]])
return RT
def RTCar(self):
yaw = self.pose3d.getYaw()
RTz = self.RTz(yaw, 0, 0, 0)
return RTz
def distancePoint2Segment(self, A, B, C):
# Segment: A[ax,ay] ; B[bx,by]
# Point: C[cx, cy]
# Calculate U parameter
u = self.parameterU(A, B, C)
if u < 0:
distance = self.distancePoint2Point(A, C)
elif u > 1:
distance = self.distancePoint2Point(B, C)
else:
distance = self.distancePoint2Rect(A, B, C)
return distance
def parameterU(self, A, B, C):
# Point A: [ax, ay]
# Point B: [bx, by]
# Point C: [cx, cy]
# Parameter U of equations: Px = ax + u*(bx-ax); and Py = ay + u*(by-ay)
u = ((C[0] - A[0])*(B[0] - A[0]) + (C[1] - A[1])*(B[1] - A[1])) / (pow((B[0] - A[0]),2) + pow((B[1] - A[1]),2))
return u
def distancePoint2Point(self, Point1, Point2):
# Point: 1[x1,y1]
# Point: 2[x2,y2]
return math.sqrt(pow((Point2[0]-Point1[0]),2) + pow((Point2[1]-Point1[1]),2))
def distancePoint2Rect(self, A, B, C):
# Rect: A[ax,ay] ; B[bx,by]
# Point: C[cx,cy]
distance = abs((B[0] - A[0])*(C[1] - A[1]) - (B[1] - A[1])*(C[0] - A[0])) / (math.sqrt(pow((B[0]-A[0]),2) + pow((B[1]-A[1]),2)))
return distance
def distanceCar2Car(self, pointCarLeft, pointCarRight, pointFrontLeft, pointFrontRight, pointRearLeft, pointRearRight):
# Mide la minima distancia desde los 4 vertices de un coche a la parte delantera o trasera de otro coche (segmento)
# Segment: pointCarLeft[x,y] ; pointCarRight[x,y]
# Point 1: pointFrontLeft[x,y]
# Point 2: pointFrontRight[x,y]
# Poitn 3: pointRearLeft[x,y]
# Point 4: pointRearRight[x,y]
distance = self.distancePoint2Segment(pointCarLeft, pointCarRight, pointFrontLeft)
if (self.distancePoint2Segment(pointCarLeft, pointCarRight, pointFrontRight) < distance):
distance = self.distancePoint2Segment(pointCarLeft, pointCarRight, pointFrontRight)
if (self.distancePoint2Segment(pointCarLeft, pointCarRight, pointRearLeft) < distance):
distance = self.distancePoint2Segment(pointCarLeft, pointCarRight, pointRearLeft)
if (self.distancePoint2Segment(pointCarLeft, pointCarRight, pointRearRight) < distance):
distance = self.distancePoint2Segment(pointCarLeft, pointCarRight, pointRearRight)
return distance
def distances(self):
carSize = [5.75, 2.5]
carSizeTaxi = [4, 2]
#Poses sidewalk
positionSideWalk_start = [-25, -4.25]
positionSideWalk_final = [35, -4.25]
# Poses parked cars (origin poses)
# Frontal car
pointCarFrontal_RearLeft = [14 - carSize[0]/2, -3+carSize[1]/2]
pointCarFrontal_RearRight = [14 - carSize[0]/2, -3-carSize[1]/2]
pointCarFrontal_FrontLeft = [14 + carSize[0]/2, -3+carSize[1]/2]
pointCarFrontal_FrontRight = [14 + carSize[0]/2, -3-carSize[1]/2]
# Rear Car
pointCarRear_FrontLeft = [0.5 + carSize[0]/2, -3+carSize[1]/2]
pointCarRear_FrontRight = [0.5 + carSize[0]/2, -3-carSize[1]/2]
pointCarRear_RearLeft = [0.5 - carSize[0]/2, -3+carSize[1]/2]
pointCarRear_RearRight = [0.5 - carSize[0]/2, -3-carSize[1]/2]
# Pose 3D (origin poses)
xFront = self.pose3d.getX() + carSizeTaxi[0]/2
xRear = self.pose3d.getX() - carSizeTaxi[0]/2
yLeft = self.pose3d.getY() + carSizeTaxi[1]/2
yRight = self.pose3d.getY() - carSizeTaxi[1]/2
# Final poses (Car's rotation)
pointFrontLeft = self.RTCar() * np.matrix([[xFront], [yLeft], [1], [1]])
pointFrontLeft = [pointFrontLeft.flat[0],pointFrontLeft.flat[1]]
pointFrontRight = self.RTCar() * np.matrix([[xFront], [yRight], [1], [1]])
pointFrontRight = [pointFrontRight.flat[0], pointFrontRight.flat[1]]
pointRearLeft = self.RTCar() * np.matrix([[xRear], [yLeft], [1], [1]])
pointRearLeft = [pointRearLeft.flat[0],pointRearLeft.flat[1]]
pointRearRight = self.RTCar() * np.matrix([[xRear], [yRight], [1], [1]])
pointRearRight = [pointRearRight.flat[0],pointRearRight.flat[1]]
# Distance car -> parked front car
distFrontFinal_1 = self.distanceCar2Car(pointCarFrontal_RearLeft, pointCarFrontal_RearRight, pointFrontLeft, pointFrontRight, pointRearLeft, pointRearRight)
# Distance parked front car -> car
distFrontFinal_2 = self.distanceCar2Car(pointFrontLeft, pointFrontRight, pointCarFrontal_RearLeft, pointCarFrontal_RearRight, pointCarFrontal_FrontLeft , pointCarFrontal_FrontRight)
# Distance car -> parked rear car
distRearFinal_1 = self.distanceCar2Car(pointCarRear_FrontLeft, pointCarRear_FrontRight, pointFrontLeft, pointFrontRight, pointRearLeft, pointRearRight)
# Distance parked rear car -> car
distRearFinal_2 = self.distanceCar2Car(pointRearLeft, pointRearRight, pointCarRear_FrontLeft , pointCarRear_FrontRight, pointCarRear_RearLeft , pointCarRear_RearRight)
# Minimal distance
if distFrontFinal_1 > distFrontFinal_2:
self.distFrontFinal = distFrontFinal_1
else:
self.distFrontFinal = distFrontFinal_2
if distRearFinal_1 > distRearFinal_2:
self.distRearFinal = distRearFinal_1
else:
self.distRearFinal = distRearFinal_2
# Distance car -> sidewalk
self.distanceSidewalk = self.distanceCar2Car(positionSideWalk_start, positionSideWalk_final, pointFrontLeft, pointFrontRight, pointRearLeft, pointRearRight)
def updateG(self):
self.distances()
self.distanceFrontalLabel.setText("Distancia frontal: " + str(round(self.distFrontFinal, 3)) + ' m')
self.distanceRearLabel.setText("Distancia trasera: " + str(round(self.distRearFinal, 3)) + ' m')
self.distanceSidewalkLabel.setText("Distancia a la acera: " + str(round(self.distanceSidewalk, 3)) + ' m')
self.update()
class notaWidget(QWidget):
def __init__(self,winParent,pose3d, tiempo, calidad, distancia):
super(notaWidget, self).__init__()
self.winParent=winParent
self.pose3d = pose3d
self.time = tiempo
self.calidad = calidad
self.distancia = distancia
self.hLayout = QHBoxLayout()
self.button = QPushButton('Show me my mark')
self.button.clicked.connect(self.notaFinal)
self.hLayout.addWidget(self.button, 0)
self.setLayout(self.hLayout)
def notaFinal(self):
notaAngle = self.testAngle() * 0.025
notaTime = self.testTime() * 0.025
notaDist = self.testDistance() * 0.025
notaCol = self.testCollision() * 0.025
nota = notaAngle + notaTime + notaDist + notaCol
notaLabel = QLabel('Nota final: ' + str(nota))
self.hLayout.addWidget(notaLabel, 0)
def testAngle(self):
yawRad = self.pose3d.getYaw()
angle = math.degrees(yawRad) + 90
if (angle >= 85 and angle <= 105):
notaAngle = 100
elif (angle < 85 and angle >= 70 or angle > 105 and angle <= 120):
notaAngle = 80
elif (angle < 70 and angle >= 60 or angle > 120 and angle <= 130):
notaAngle = 50
else:
notaAngle = 0
return notaAngle
def testTime(self):
minTime = 170
myTime = self.time.seconds
notaTime = float(minTime*100)/float(myTime)
if myTime < 170:
notaTime = 100
return notaTime
def testDistance(self):
MyDistFront = self.distancia.distFrontFinal
MyDistRear = self.distancia.distRearFinal
MyDistSidewalk = self.distancia.distanceSidewalk
if MyDistFront >= 1.5 and MyDistFront < 3.5:
notaDistFront = 100
elif MyDistFront < 1.5 and MyDistFront >= 1:
notaDistFront = 50
else:
notaDistFront = 0
if MyDistRear >= 1.5 and MyDistRear < 3.5:
notaDistRear = 100
elif MyDistRear < 1.5 and MyDistRear >= 1:
notaDistRear = 50
else:
notaDistRear = 0
if MyDistSidewalk > 0 and MyDistSidewalk <= 0.75:
notaDistSidewalk = 100
elif MyDistSidewalk > 0.75 and MyDistSidewalk < 1.5:
notaDistSidewalk = 50
else:
notaDistSidewalk = 0
notaDist = float(notaDistFront+notaDistRear+notaDistSidewalk)/float(3)
return notaDist
def testCollision(self):
minCrash = 0
if self.calidad.numCrash == 0:
notaCol = 100
else:
notaCol = float(minCrash*100)/float(self.calidad.numCrash)
return notaCol
def updateG(self):
self.update()
class tiempoWidget(QWidget):
time = pyqtSignal()
def __init__(self,winParent):
super(tiempoWidget, self).__init__()
self.winParent=winParent
self.seconds = 0
hLayout = QHBoxLayout()
tiempoLabel = QLabel("Tiempo")
self.lcd = QLCDNumber(self)
self.lcd.setMaximumSize(100,50)
hLayout.addWidget(tiempoLabel,0)
hLayout.addWidget(self.lcd, 1)
hSpacer = QSpacerItem(300, 30, QSizePolicy.Ignored, QSizePolicy.Ignored)
hLayout.addItem(hSpacer)
self.setLayout(hLayout)
timer = QTimer(self)
timer.start(1000)
timer.timeout.connect(self.printTime)
# get the palette
palette = self.lcd.palette()
# foreground color
palette.setColor(palette.WindowText, QColor(85, 85, 255))
# background color
palette.setColor(palette.Background, QColor(0, 170, 255))
# "light" border
palette.setColor(palette.Light, QColor(255, 0, 0))
# "dark" border
palette.setColor(palette.Dark, QColor(0, 255, 0))
# set the palette
self.lcd.setPalette(palette)
def printTime(self):
self.seconds += 1
self.lcd.display(self.seconds)
class quesoWidget(QWidget):
def __init__(self,winParent, pose3d):
super(quesoWidget, self).__init__()
self.winParent=winParent
self.rectangle = QRectF(0.0, 0.0, 300.0, 300.0)
self.pose3d = pose3d
def drawRedZones(self, painter):
self.setStyle(painter, QColor(255,70,70),QColor(255,70,70),1)
startAngle = 0 * 16
spanAngle = 45 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
startAngle = 135 * 16
spanAngle = 45 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
startAngle = 180 * 16
spanAngle = 180 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
def drawOrangeZones(self, painter):
self.setStyle(painter, QColor(255,220,23),QColor(255,220,23),1)
startAngle = 45 * 16
spanAngle = 30 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
startAngle = 105 * 16
spanAngle = 30 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
def drawGreenZones(self, painter):
self.setStyle(painter, QColor(117,240,154),QColor(117,240,154),1)
startAngle = 75 * 16
spanAngle = 15 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
startAngle = 90 * 16
spanAngle = 15 * 16
painter.drawPie(self.rectangle, startAngle, spanAngle)
def drawArrow(self, painter, angle=90):
radius = 130
yawRad = self.pose3d.getYaw()
angle = -(yawRad + pi/2) # PI/2 para centrar la aguja
origx = self.rectangle.width() / 2
origy = self.rectangle.height() / 2
finx = radius * math.cos(angle) + origx
finy = radius * math.sin(angle) + origy
self.setStyle(painter, Qt.black,Qt.black,3)
painter.drawLine(QPoint(origx,origy), QPoint(finx,finy))
painter.drawEllipse(145,145, 10, 10)
def resetPen(self, painter):
pen = QPen(Qt.black, 1)
brush = QBrush()
painter.setPen(pen)
painter.setBrush(brush)
def setStyle(self, painter, fillColor, penColor, stroke):
brush = QBrush()
pen = QPen(penColor, stroke)
brush.setColor(fillColor)
brush.setStyle(Qt.SolidPattern)
painter.setBrush(brush)
painter.setPen(pen)
painter.setRenderHint(QPainter.Antialiasing)
def paintEvent(self, event):
painter = QPainter(self)
self.drawRedZones(painter)
self.drawOrangeZones(painter)
self.drawGreenZones(painter)
self.drawArrow(painter,120)
def updateG(self):
self.update()
if __name__ == "__main__":
app = QApplication(sys.argv)
ic = EasyIce.initialize(sys.argv)
pose3d = Pose3DClient(ic, "Autopark.Pose3D", True)
laser1 = LaserClient(ic, "Autopark.Laser1", True)
laser2 = LaserClient(ic, "Autopark.Laser2", True)
laser3 = LaserClient(ic, "Autopark.Laser3", True)
myGUI = MainWindow(pose3d, laser1, laser2, laser3)
myGUI.show()
t2 = ThreadGUI(myGUI)
t2.daemon=True
t2.start()
sys.exit(app.exec_())
| RoboticsLabURJC/2016-tfg-irene-lope | AutoPark_Practice/referee.py | referee.py | py | 19,643 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QGridLayout",
"line_number": 20,
"usage_type": "call"
},
{
"api_n... |
41646398531 | """ Module containing routines to setup the training of policies.
"""
import argparse
from typing import Optional, Sequence
from aizynthfinder.training.utils import Config
from aizynthfinder.training.keras_models import (
train_expansion_keras_model,
train_filter_keras_model,
train_recommender_keras_model,
)
def main(optional_args: Optional[Sequence[str]] = None) -> None:
"""Entry-point for the aizynth_training tool"""
parser = argparse.ArgumentParser("Tool to train a network policy")
parser.add_argument("config", help="the filename to a configuration file")
parser.add_argument(
"model",
choices=["expansion", "filter", "recommender"],
help="the model to train",
)
args = parser.parse_args(optional_args)
config = Config(args.config)
if args.model == "expansion":
train_expansion_keras_model(config)
elif args.model == "filter":
train_filter_keras_model(config)
elif args.model == "recommender":
train_recommender_keras_model(config)
if __name__ == "__main__":
main()
| AlanHassen/modelsmatter | aizynthfinder/training/training.py | training.py | py | 1,085 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "aizynthfinder... |
3116557557 |
import torch
import torch.nn as nn
from torch.optim import Adam
import torch.nn.functional as F
from random import randint
import numpy as np
# import subprocess
# import multiprocessing
# import concurrent.futures
from time import time
from math import sqrt
CHANNEL = 256
BLOCKNUM = 40
BOARDSIZE = 8
BATCH = 50
EPOCHS = 20
DATASIZE = 7200
DATAUSE = 2000
ROUNDLIMIT = 500
PROCESS = 3
OUTPUT_INFO = 1
class resBlock(nn.Module):
def __init__(self, x):
super(resBlock, self).__init__()
self.resBlock = nn.Sequential(
nn.Conv2d(x, x, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True),
nn.Conv2d(x, x, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(x)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
shortCut = x
out = self.resBlock(x)
out += shortCut
out = self.relu(out)
return out
class resCNN(nn.Module):
def __init__(self):
super(resCNN, self).__init__()
self.input = nn.Sequential(
nn.Conv2d(3, CHANNEL, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(CHANNEL),
nn.ReLU(inplace=True)
)
self.resnet = nn.Sequential()
for i in range(BLOCKNUM):
self.resnet.add_module(str(i),resBlock(CHANNEL))
self.ph = nn.Sequential(
nn.Conv2d(CHANNEL, 2, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(2),
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(BOARDSIZE*BOARDSIZE*2, BOARDSIZE*BOARDSIZE),
# nn.Softmax(dim=1)
)
self.vh = nn.Sequential(
nn.Conv2d(CHANNEL, 1, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(BOARDSIZE*BOARDSIZE, CHANNEL),
nn.ReLU(inplace=True),
nn.Linear(CHANNEL, 1),
nn.Tanh()
)
def forward(self, x):
model = self.input(x)
model = self.resnet(model)
p = self.ph(model)
v = self.vh(model)
return p, v
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cnn = resCNN()
cnn.load_state_dict(torch.load(r'./rescnn.pth'))
cnn.to(device)
optimizer = Adam(cnn.parameters(), weight_decay=1e-4)
stateData = torch.zeros(DATASIZE, 3, 8, 8, dtype=float)
policyData = torch.zeros(DATASIZE, 64, dtype=float)
valueData = torch.zeros(DATASIZE, 1, dtype=float)
policyLossFunc = nn.CrossEntropyLoss()
valueLossFunc = nn.MSELoss()
def calc(cood):
return cood[0] * BOARDSIZE + cood[1]
def lossFunction(policyOutput, valueOutput, policyTarget, valueTarget):
policyLoss = policyLossFunc(policyOutput, policyTarget)
valueLoss = valueLossFunc(valueOutput, valueTarget)
return policyLoss + valueLoss
def train():
cnn.train()
use = torch.zeros(DATASIZE)
inputData = torch.zeros(DATAUSE,3,8,8)
policyTargetData = torch.zeros(DATAUSE,64)
valueTargetData = torch.zeros(DATAUSE,1)
i = 0
while i < DATAUSE:
x = randint(0, DATASIZE - 1)
if use[x] == 1:
continue
inputData[i] = stateData[x]
policyTargetData[i] = policyData[x]
valueTargetData[i] = valueData[x]
use[x] = 1
i += 1
optimizer.zero_grad()
for i in range(EPOCHS):
policyLossAvg = 0.0
valueLossAvg = 0.0
if OUTPUT_INFO:
print(f'epoch {i+1}:')
for j in range(0, DATAUSE, BATCH):
input = inputData[j:j+BATCH]
policyTarget = policyTargetData[j:j+BATCH]
valueTarget = valueTargetData[j:j+BATCH]
policyOutput, valueOutput = cnn(input.to(device))
policyLoss = policyLossFunc(policyOutput, policyTarget.to(device))
valueLoss = valueLossFunc(valueOutput, valueTarget.to(device))
loss = policyLoss + valueLoss
policyLossAvg += float(policyLoss)
valueLossAvg += float(valueLoss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if OUTPUT_INFO:
print(f' policy loss: {policyLossAvg / (DATAUSE / BATCH)}')
print(f' value loss: {valueLossAvg / (DATAUSE / BATCH)}')
print(f' total loss: {(policyLossAvg + valueLossAvg) / (DATAUSE / BATCH)}')
torch.save(cnn.state_dict(), r'./rescnn.pth')
class GameState:
def __init__(self):
self.board = np.zeros((8, 8), dtype=np.int8) # 0 ~ 7
self.board[3, 3] = self.board[4, 4] = -1
self.board[3, 4] = self.board[4, 3] = 1 #Black 1 White -1
self.history = []
def copy(self):
state = GameState()
state.board = np.copy(self.board)
state.history = self.history[:]
return state
def makeMove(self, move, player):
self.history.append(move)
self.board[move] = player
for d in (-1, 0, 1):
for e in (-1, 0, 1):
if d == 0 and e == 0:
continue
x, y = move
x += d
y += e
to_flip = []
while x >= 0 and y >= 0 and x < 8 and y < 8 and self.board[x, y] == -player:
to_flip.append((x, y))
x += d
y += e
if x >= 0 and y >= 0 and x < 8 and y < 8 and self.board[x, y] == player:
for f in to_flip:
self.board[f] = player
def isValid(self, move, player):
if self.board[move] != 0:
return False
for d in (-1, 0, 1):
for e in (-1, 0, 1):
if d == 0 and e == 0:
continue
x, y = move
x += d
y += e
num = 0
while x >= 0 and y >= 0 and x < 8 and y < 8 and self.board[x, y] == -player:
x += d
y += e
num += 1
if num > 0 and x >= 0 and y >= 0 and x < 8 and y < 8 and self.board[x, y] == player:
return True
return False
def getValidMoves(self, player):
moves = []
for i in range(8):
for j in range(8):
if self.isValid((i, j), player):
moves.append((i, j))
return moves
def isTerminal(self):
if len(self.getValidMoves(1)) > 0:
return False
if len(self.getValidMoves(-1)) > 0:
return False
return True
def getWinner(self):
count = np.sum(self.board)
if count > 0:
return 1
elif count < 0:
return -1
else:
return 0
def getScore(self, player):
cnt = 0
for i in range(8):
for j in range(8):
if self.board[i,j] == player:
cnt += 1
return cnt
def print(self):
print(' ',end='')
for i in range(8):
print(i,end=' ')
print('')
for i in range(8):
print(i,end=' ')
for j in range(8):
if self.board[i,j] == 1:
print('#',end=' ')
elif self.board[i,j] == -1:
print('O',end=' ')
else:
print('.',end=' ')
print('')
PUCT_CONSTANT = 1
class MCTSNode:
def __init__(self, state:GameState, player):
self.state:GameState = state.copy()
self.parent:MCTSNode = None
self.children = []
self.unexploredMoves = state.getValidMoves(player)
self.player = player
self.n = 0
self.v = 0.0
self.p = 0.0
self.policyPredict = torch.zeros(64)
self.valuePredict = 0.0
if type == 2:
input = torch.zeros(3,8,8)
for i in range(8):
for j in range(8):
if state.board[i,j] == 1:
input[0,i,j] = 1
for i in range(8):
for j in range(8):
if state.board[i,j] == -1:
input[1,i,j] = 1
for i in range(8):
for j in range(8):
input[2,i,j] = player
input.unsqueeze_(0)
output = cnn(input.to(device))
self.policyPredict = F.softmax(output[0][0], dim=-1)
self.valuePredict = float(output[1][0])
def expand(self):
if len(self.unexploredMoves) <= 0:
return None
move = self.unexploredMoves.pop()
newState = self.state.copy()
newState.makeMove(move, self.player)
child = None
if len(newState.getValidMoves(-self.player)) > 0:
child = MCTSNode(newState, -self.player)
else:
child = MCTSNode(newState, self.player)
child.parent = self
child.p = float(self.policyPredict[calc(move)])
self.children.append(child)
return child
def puct(self, player):
Q = self.v / self.n
U = PUCT_CONSTANT * self.p * sqrt(self.parent.n + 1) / (self.n + 1)
if player == -1:
Q = -Q
return Q + U
def select(self, player):
return max(self.children, key=lambda c: c.puct(player))
def backpropagate(self, v):
self.n += 1
self.v += v
if self.parent:
self.parent.backpropagate(v)
class CNNMCTS:
def __init__(self):
return
def CNNMCTSBestMove(self, state, player, timeIterations):
rootNode = MCTSNode(state, player)
for i in range(timeIterations):
node = rootNode
while len(node.unexploredMoves) == 0 and node.state.isTerminal() == False:
if len(node.children) > 0:
node = node.select(player)
else:
break
if len(node.unexploredMoves) > 0 and node.state.isTerminal() == False:
node = node.expand()
if node.state.isTerminal() == False:
node.backpropagate(node.valuePredict)
else:
node.backpropagate(node.state.getWinner())
bestChild = rootNode.children[0]
for child in rootNode.children:
if child.n > bestChild.n:
bestChild = child
return bestChild.state.history[-1]
def gen_py():
MCTS = CNNMCTS()
cnt = 0
cnn.eval()
while cnt < DATASIZE:
c_state = GameState()
currentPlayer = 1
cur = 0
lst = cnt
while c_state.isTerminal() == 0:
if len(c_state.getValidMoves(currentPlayer)) <= 0:
currentPlayer = -currentPlayer
continue
bestMove = MCTS.CNNMCTSBestMove(c_state, currentPlayer, ROUNDLIMIT)
cur += 1
if 5 <= cur and cur <= 54 and cnt < DATASIZE:
for i in range(8):
for j in range(8):
if c_state.board[i,j] == 1:
stateData[cnt,0,i,j] = 1
for i in range(8):
for j in range(8):
if c_state.board[i,j] == -1:
stateData[cnt,1,i,j] = 1
for i in range(8):
for j in range(8):
stateData[cnt,2,i,j] = currentPlayer
policyData[cnt] = calc(bestMove)
cnt += 1
c_state.makeMove(bestMove, currentPlayer)
currentPlayer = -currentPlayer
valueData[lst:cnt] = c_state.getWinner()
if OUTPUT_INFO:
print(f'{cnt} / {DATASIZE}\r', end='')
if OUTPUT_INFO:
print('')
if __name__ == '__main__':
np.set_printoptions(suppress=True, precision=7)
# multiprocessing.freeze_support()
times = 0
while 1 :
if OUTPUT_INFO:
print(f'iteration {times}:')
print('self-matching:')
gen_py()
# gen_cpp()
# gen_mainProcess() # in train.py
if OUTPUT_INFO:
print('train start:')
train()
# archivePath = 'D:/Desktop/yanxue/rescnn_archive/rescnn-iteration' + str(times) +'.pth'
# torch.save(cnn.state_dict(), archivePath) | wxwoo/yanxue | train_py.py | train_py.py | py | 12,641 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
35425394354 | from yaml_parser import parse_yaml
from voluptuous import Schema,Object, Range, Coerce, All, Any, Optional, Lower, Invalid
import re
import sys
import argparse
"""
Python YAML validator
"""
list_of_ints = All([Coerce(int)], msg='invalid list of ints')
from datetime import datetime
def check_date(datestring):
try:
fmt='%Y-%m-%d'
date_to_test = datetime.strptime(datestring, fmt)
Coerce(datetime)
except:
raise Invalid('expected in Y-m-d')
simulation_schema=Schema({
'quantiles': [All(Coerce(int), Range(1, 100), msg='not a valid quantile')],
'prediction': {
'model': str,
'window': int
},
'startdate': check_date,
'enddate': check_date,
'replenishment': {
'model': str
},
'input_file' : str
})
replenishment_schema=Schema({
'quantiles': [All(Coerce(int), Range(1, 100), msg='not a valid quantile')],
'prediction': {
'model': str,
'window': int
},
'replenishment': {
'model': str
},
'input_file' : str
})
def test_file(yamlconfig, types):
if types=='simulation':
simulation_schema(yamlconfig['simulation'])
if types=='replenishemnt':
replenishment_schema(yamlconfig['replenishment'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-y","--yaml", help="yaml inputfile to test", type=str)
parser.add_argument("-t","--types", help="type of yaml", type=str)
args = parser.parse_args()
### Parse YAML to test
to_test = parse_yaml(args.yaml)
test_file(to_test,args.types)
| philippmack/europython2015-pmack | config/validator.py | validator.py | py | 1,633 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "voluptuous.All",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "voluptuous.Coerce",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.... |
2736199577 | import keras
from keras import backend as K
from keras.callbacks import Callback
import numpy as np
class BitsLogger(Callback):
def __init__(self, nConvs=9, **kwargs):
self.norm = 1./np.log(float(nConvs))
self.bits_history=[]
self.filterLayers=[]
super(BitsLogger, self).__init__(**kwargs)
def on_train_begin(self, logs):
layers = self.model.layers
for l in layers:
if l.name == 'model_1':
layers=l.layers
for l in layers:
if "filter_mask" in l.name:
self.filterLayers.append(l)
def on_epoch_end(self, epoch, logs={}):
bitsum=0.
for l in self.filterLayers:
weights=K.flatten(l.filterProbs)
b=-self.norm*K.sum(weights*K.log(weights))
bitsum += b
print(' Activation bits: ' + str(K.eval(bitsum)))
logs['activation_bits'] = K.eval(bitsum)
self.bits_history.append(K.eval(bitsum))
class EntropyLogger(Callback):
def __init__(self, **kwargs):
self.entropy_history=[]
self.filterLayers=[]
self.constant = 0.5*np.log(2*np.pi) + 0.5
self.hmin=0.
self.hmax=0.
self.norm=1.
super(EntropyLogger, self).__init__(**kwargs)
def on_train_begin(self, logs):
layers = self.model.layers
for l in layers:
if l.name == 'model_1':
layers=l.layers
for l in layers:
if "filter_mask" in l.name:
self.filterLayers.append(l)
nFilters = K.eval(K.shape(self.filterLayers[-1].filterProbs)[-1])
r=np.random.uniform(size=(1000000, nFilters))
sigma = np.std(r, axis=1)
self.hmin = 1.05 * np.log(np.amin(sigma, axis=0))
self.hmax = 0.95 * np.log(np.amax(sigma, axis=0))
self.norm = 1. / (self.hmax - self.hmin)
def on_epoch_end(self, epoch, logs={}):
s=0.
for l in self.filterLayers:
weights = K.flatten(l.filterProbs)
s += self.norm*(K.log(K.std(weights)) - self.hmin)
print(' entropy: ' + str(K.eval(s)) )
logs['entropy'] = K.eval(s)
self.entropy_history.append(K.eval(s))
| twoev/APEMEN | utils/callbacks.py | callbacks.py | py | 2,012 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.callbacks.Callback",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.log",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "keras.backend.flatten",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.backend... |
3749581806 | import glob
import platform
import setuptools
import Cython.Build
# By compiling this separately as a C library, we avoid problems
# with passing C++-specific flags when building the extension
lrslib = ('lrslib', {'sources': glob.glob("solvers/lrs/*.c")})
cppgambit = setuptools.Extension(
"pygambit.lib.libgambit",
sources=(
["pygambit/lib/libgambit.pyx"] +
glob.glob("core/*.cc") +
glob.glob("games/*.cc") +
glob.glob("games/agg/*.cc") +
glob.glob("solvers/*/*.cc") +
["tools/lp/nfglp.cc",
"tools/lp/efglp.cc",
"tools/logit/path.cc",
"tools/logit/nfglogit.cc",
"tools/logit/efglogit.cc"]
),
language="c++",
include_dirs=["."],
extra_compile_args=(
["-std=c++11"] if platform.system() == "Darwin" else []
)
)
def readme():
with open("README.rst") as f:
return f.read()
setuptools.setup(
name="pygambit",
version="16.0.2",
description="Software tools for game theory",
long_description=readme(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Mathematics"
],
keywords="game theory Nash equilibrium",
license="GPL2+",
author="Theodore Turocy",
author_email="ted.turocy@gmail.com",
url="http://www.gambit-project.org",
project_urls={
'Documentation': 'https://gambitproject.readthedocs.io/',
'Source': 'https://github.com/gambitproject/gambit',
'Tracker': 'https://github.com/gambitproject/gambit/issues',
},
python_requires=">=3.7",
install_requires=[
'lxml', # used for reading/writing GTE files
'numpy',
'scipy',
],
libraries=[lrslib],
packages=['pygambit', 'pygambit.games', 'pygambit.lib'],
ext_modules=Cython.Build.cythonize(cppgambit)
)
| vignesh7056/gambit | src/setup.py | setup.py | py | 2,265 | python | en | code | null | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "setuptools.Extension",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number"... |
36090200838 | """Some useful functions to deal with GitHub."""
import datetime
from github import Github
from github import UnknownObjectException
import click
class GitHubMux:
"""Class that let's you operate in multiple repos of the same org at the same time."""
def __init__(self, organization, token, exclude):
"""
Instantiate class.
Args:
organization(string): Organization name.
token(string): Token to interact with GitHub API.
exclude(tuple): Tuple with all the repo names that have to excluded from processing.
"""
self.token = token
self.gh = Github(self.token)
self.exclude = exclude
try:
self.org = self.gh.get_organization(organization)
except UnknownObjectException:
raise Exception("Looks like organization `{}` doesn't exist.".format(organization))
def exclude_repo(self, repo):
"""
Exclude a repo.
Args:
repo(string): Repo of the name to exclude
"""
self.exclude = self.exclude + (repo, )
def repos(self):
"""Return repos to process."""
for repo in self.org.get_repos():
if repo.name in self.exclude:
self.exclude_repo
click.secho("Skipping repo `{}`.".format(repo.name), fg="blue")
else:
yield repo
def _set_label_repo(self, repo, name, color):
"""
Create a label if it doesn't exist already.
Args:
repo(Repository): Repo where you want to create the label
name(string): Name of the label
color(string): Color of the label
Return:
(Label) Either the label that was created of the existing one.
"""
try:
label = repo.get_label(name)
if label.color == color:
click.secho("Label `{}` already exists in repo `{}`. ".format(name,
repo.name),
fg='green')
else:
click.secho("Label `{}` already exists in repo `{}` "
"but has a different color. Fixing.".format(name,
repo.name),
fg='yellow')
label.edit(name, color)
except UnknownObjectException:
click.secho("Label `{}` doesn't exist in repo `{}`. Creating.".format(name,
repo.name),
fg='yellow')
label = repo.create_label(name, color)
return label
def set_label(self, name, color):
"""
Create a label in all repos if it doesn't exist.
Args:
name(string): Name of the label
color(string): Color of the label
"""
for repo in self.repos():
self._set_label_repo(repo, name, color)
def _unset_label_repo(self, repo, name):
"""
Delete a label if it exists.
Args:
repo(Repository): Repo where you want to create the label
name(string): Name of the label
"""
try:
label = repo.get_label(name)
click.secho("Label `{}` exists in repo `{}`. Deleting.".format(name,
repo.name),
fg='yellow')
label.delete()
except UnknownObjectException:
click.secho("Label `{}` is already missing in repo `{}`.".format(name,
repo.name),
fg='green')
def unset_label(self, name):
"""
Delete a label in all the repos that it exists.
Args:
name(string): Name of the label
"""
for repo in self.repos():
self._unset_label_repo(repo, name)
def rename_label(self, name, new_name):
"""
Rename an existing label in all the repos that it exists.
Args:
name(str): Current name of the label
new_name(str): New name for the label
"""
for repo in self.repos():
try:
label = repo.get_label(name)
click.secho("Label `{}` exists in repo `{}`. Renaming.".format(name,
repo.name),
fg='yellow')
label.edit(new_name, label.color)
except UnknownObjectException:
click.secho("Couldn't find label `{}` in repo `{}`.".format(name,
repo.name),
fg='green')
def _get_labels_from_repo(self, repo):
"""
Get labels from a repo.
Args:
repo(Repository): Repository to process.
Return:
list(Label): List of Labels of repo.
"""
labels = set()
for label in repo.get_labels():
labels.add((label.name, label.color))
return labels
def synch_from_repo(self, repo):
"""
Synch labels across repos.
Ensure that all repos have exactly the same labels as another repo that holds
the source of truth. If labels exists same color is enforced, if labels don't exist they
are created and if there are more labels than necessary they are deleted.
Args:
repo(str): Name of the repo that holds the truth.
"""
repo = self.org.get_repo(repo)
orig_labels = self._get_labels_from_repo(repo)
for r in self.repos():
if r.name == repo.name:
continue
click.secho("Processing {}".format(r.name), fg="cyan")
r_labels = self._get_labels_from_repo(r)
to_update = orig_labels - r_labels
for l_tuple in to_update:
self._set_label_repo(r, l_tuple[0], l_tuple[1])
# We refresh labels as some might have changed color in the previous step
r_labels = self._get_labels_from_repo(r)
to_delete = r_labels - orig_labels
for l_tuple in to_delete:
self._unset_label_repo(r, l_tuple[0])
def search_issue_by_title(self, title, org, repo):
"""
Search for an issue with `title` in org/repo.
Args:
title(string): Title of the issue
org(string): Organization name the issue has to belong to
repo(string): Repository name the issue has to belong to
Return:
(Issue): that matches the criteria or None.
Raise:
(Exception): If there is more than one match.
"""
query = "{} in:Title repo:{}/{}".format(title, org, repo)
issues = self.gh.search_issues(query)
for i in issues:
if i.title == title:
return i
return None
def move_issue(self, issue_id, src_repo, dst_repo):
"""
Move an issue between different repos.
Original issue is going to be closed while the new one will reference to the original issue
and mention the original reporter.
Args:
issue_id(int): Issue number
src_repo(string): Name of the source repo where the issue lives
dst_repo(string): Name of the repo where you want to move the issue to
"""
src_repo = self.org.get_repo(src_repo)
dst_repo = self.org.get_repo(dst_repo)
issue = src_repo.get_issue(issue_id)
new_body = "Original issue {}/{}#{} created by @{}\n\n{}".format(
src_repo.organization.name,
src_repo.name,
issue.number,
issue.user.login,
issue.body)
issue.edit(state="closed")
new_issue = dst_repo.create_issue(title=issue.title, body=new_body, labels=issue.labels)
click.secho("Issue moved, new ID is #{} - {}".format(new_issue.id, new_issue.url),
fg="yellow")
issue.create_comment("This issue has been 'moved' to {}/{}#{}".format(
dst_repo.organization.name,
dst_repo.name,
new_issue.number))
def spread_issue(self, issue_id, src_repo):
"""
Spread an issue to multiple repos.
Given a issue_id from a source repo it will create issues in the rest of the repos
linking back to the original one.
Args:
issue_id(int): Issue number of the issue you want to spread.
src_repo(string): Repository name where the issue lives.
"""
issue = self.org.get_repo(src_repo).get_issue(issue_id)
self.exclude_repo(issue.repository.name)
body = "See details in the parent issue {}/{}#{}\n\n".format(
issue.repository.organization.name,
issue.repository.name,
issue.number)
for repo in self.repos():
new_issue = self.search_issue_by_title(issue.title, repo.organization.name, repo.name)
if new_issue:
click.secho("Issue already exists, ID is {}/{}#{} - {}".format(
new_issue.repository.organization.name,
new_issue.repository.name,
new_issue.number,
new_issue.url),
fg="green")
else:
new_issue = repo.create_issue(title=issue.title, body=body, labels=issue.labels)
click.secho("Issue created, ID is {}/{}#{} - {}".format(
new_issue.repository.organization.name,
new_issue.repository.name,
new_issue.number,
new_issue.url),
fg="yellow")
def pr_stats(self, days):
"""Gather stats for the past few days."""
stats = {}
summary_user = {}
summary_repo = {}
for repo in self.repos():
stats[repo.name] = {}
summary_repo[repo.name] = {
"count": 0,
"commits": 0,
"additions": 0,
"deletions": 0,
}
for pr in repo.get_pulls(state="all", sort="created", direction="desc"):
if pr.created_at < (datetime.datetime.now() - datetime.timedelta(days=days)):
break
summary_repo[repo.name]["count"] += 1
summary_repo[repo.name]["commits"] += pr.commits
summary_repo[repo.name]["additions"] += pr.additions
summary_repo[repo.name]["deletions"] += pr.deletions
if pr.user.login not in stats[repo.name]:
stats[repo.name][pr.user.login] = {
"count": 1,
"commits": pr.commits,
"additions": pr.additions,
"deletions": pr.deletions,
}
else:
stats[repo.name][pr.user.login]["count"] += 1
stats[repo.name][pr.user.login]["commits"] += pr.commits
stats[repo.name][pr.user.login]["additions"] += pr.additions
stats[repo.name][pr.user.login]["deletions"] += pr.deletions
if pr.user.login not in summary_user:
summary_user[pr.user.login] = {
"count": 1,
"commits": pr.commits,
"additions": pr.additions,
"deletions": pr.deletions,
}
else:
summary_user[pr.user.login]["count"] += 1
summary_user[pr.user.login]["commits"] += pr.commits
summary_user[pr.user.login]["additions"] += pr.additions
summary_user[pr.user.login]["deletions"] += pr.deletions
return {
"stats": stats,
"summary_user": summary_user,
"summary_repo": summary_repo
}
def issue_stats(self, days):
"""Gather stats for the past few days."""
stats = {}
for repo in self.repos():
stats[repo.name] = {"count": 0}
for issue in repo.get_issues(state="closed", sort="updated", direction="desc"):
if issue.updated_at < (datetime.datetime.now() - datetime.timedelta(days=days)):
break
stats[repo.name]["count"] += 1
return {
"stats": stats,
}
| napalm-automation/tooling | gh_tools/github_helpers.py | github_helpers.py | py | 13,728 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "github.Github",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "github.UnknownObjectException",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "click.secho",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "click.secho",... |
43256048913 | import os
import xlsxwriter
# Change basepath if applicable
basepath = "C:\\Users\\AYuen\\Environmental Protection Agency (EPA)\\ECMS - Documents\\newfiles\\"
workbook = xlsxwriter.Workbook(basepath+'fileandid.xlsx')
worksheet = workbook.add_worksheet("Sheet 1")
# Start from the first cell.
# Rows and columns are zero indexed.
row = 0
col = 0
# Get all files in the directory
qq = []
for (root, dirs, files) in os.walk(basepath, topdown=False):
if len(files) > 0:
for file in files:
qq.append(os.path.join(root,file))
print(qq[1])
for item in qq:
rid = item.split('\\')[6]
fname = item.split('\\')[7]
print(f'record id is {rid}')
print(f'file name is {fname}')
worksheet.write(row, col, rid)
worksheet.write(row, col + 1, fname)
row += 1
workbook.close()
| USEPA/Document_Processing_Scripts | getidfilename.py | getidfilename.py | py | 827 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "xlsxwriter.Workbook",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
18598274205 | import requests
from data_access.openWeatherMap.client import OpenWeatherMap
from business_logic.services import GetWeatherService
from config import OWM_API_KEY, OWM_BASE_URL
from .server import Request, Response
def get_weather_controller(request: Request) -> Response:
cities = request.params.get('query')[0]
with requests.Session() as session:
weather_api = OpenWeatherMap(session=session, api_key=OWM_API_KEY, base_url=OWM_BASE_URL)
weather_service = GetWeatherService(weather_api_adapter=weather_api)
weather_data_in_cities = weather_service.get_weather_in_cities(cities=cities)
headers = {"Content-Type": "text/html"}
mes = "<html><body><h1><b>Weather Data Table</b></h1><table>"
mes += "<tr><th>city</th><th>temp</th><th>description</th><th>humidity</th></tr>"
for weather_data in weather_data_in_cities:
mes += (f"<tr><td>{weather_data.name}</td><td>{weather_data.main.temp}</td>"
f"<td>{weather_data.weather[0].description}</td><td>{weather_data.main.humidity}</td></tr>")
mes += "</table></body></html>"
return Response(
status="200 OK",
headers=headers,
body=mes
)
def hello_world_controller(request: Request) -> Response:
mes = "<h1>Hello World!</h1>"
headers = {"Content-Type": "text/html"}
return Response(
status="200 OK",
headers=headers,
body=mes
)
urlpatterns = [
('/', get_weather_controller),
('/hello', hello_world_controller)
]
class WebApplication: # Web-Frameworks: Django, Flask, FastAPI
def _get_404_error(self, request: Request) -> Response:
mes = f"<h1>404 ERROR, URL {request.path} NOT FOUND"
headers = {"Content-Type": "text/html"}
return Response(
status="404 NOT FOUND",
headers=headers,
body=mes
)
def __call__(self, request: Request) -> Response:
for url_path, controller in urlpatterns:
if url_path == request.path:
resp = controller(request)
return resp
return self._get_404_error(request=request)
| pyteacher123/py35-onl | weather_app_refactored/presentation/web/application.py | application.py | py | 2,199 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "server.Request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.Session",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "data_access.openWeatherMap.client.OpenWeatherMap",
"line_number": 11,
"usage_type": "call"
},
{
"a... |
14835956764 | import torch
import torchaudio
import numpy as np
import opensmile
from collections import namedtuple
from .scan_data import scan_rootdir, CHANNELS
from .load_data import load_anno_tensor, load_vad_df
from .segment_data import SegmentEgs
class ChunkOpenSmileDataSet:
def __init__(self, rootdir,
channels=CHANNELS,
#transform=torchaudio.transforms.MFCC(n_mfcc=40), # n_mfcc=80, melkwargs={'n_fft': 1280}
feats2anno_rate=1,
chunk_size_s=2,
chunk_hop_s=1, use_vad=True):
""" feats2anno_rate = feats_sr / anno_sr """
self.rootdir = rootdir
self.channels = channels
self.transform = opensmile.Smile(
feature_set=opensmile.FeatureSet.ComParE_2016,
feature_level=opensmile.FeatureLevel.Functionals)
#self.transform = transform
self.feats2anno_rate = feats2anno_rate
self.finfos = scan_rootdir(rootdir, channels)
preloaded_annos = [load_anno_tensor(f.anno[0]) for f in self.finfos]
self.segments = []
Chunk = namedtuple('Chunk', ['start_sec', 'end_sec'])
for f, p_a in zip(self.finfos, preloaded_annos):
if use_vad and f.vad:
for _, row in load_vad_df(f.vad).iterrows():
start = row.start_sec
#row.end_sec
keep_doing=True
while keep_doing:
end = start + chunk_size_s
if end > row.end_sec:
end = row.end_sec
start = max(0, end-chunk_size_s)
keep_doing=False
chunk = Chunk(start, end)
start += chunk_hop_s
self.segments.append(SegmentEgs(f, chunk, p_a))
else:
total = torchaudio.info(f.wav[0]).num_frames//f.wav[1]
for start in range(0, total - chunk_size_s, chunk_hop_s):
chunk = Chunk(start, start + chunk_size_s)
self.segments.append(SegmentEgs(f, chunk, p_a))
print(f"{len(self.segments)} chunks")
def __len__(self):
return len(self.segments)
def total_sec(self):
return sum(s.duration for s in self.segments)
def size(self, index):
return self.segments[index].duration
def __getitem__(self, index):
seq = self.segments[index]
wav_keeper = seq.wav_keeper
feats = self.transform.process_file(wav_keeper.wav_fname,
start=wav_keeper.start_sec,
end = wav_keeper.end_sec).values# 1 X feats
feats = torch.from_numpy(feats).T # feats X 1
anno = seq.anno.mean(dim=-2)
#corr_anno_len = round(feats.shape[-1] / self.feats2anno_rate)
# if abs(anno.shape[0] - corr_anno_len) > 2:
# print(f"WARNING: element {index}, {anno.shape[0]=} ({corr_anno_len=}), {feats.shape[-1]=}, {self.feats2anno_rate=}")
# anno = anno[:corr_anno_len]
# corr_feats_len = round(anno.shape[0] * self.feats2anno_rate)
# feats = feats[:, :corr_feats_len]
return {'feats': feats,
'labels': anno,
'padding': torch.ones(anno.shape[0]),
'index': index}
| medbar/maga_sis | 3/ULM/utils/chunk_opensmile_dataset.py | chunk_opensmile_dataset.py | py | 3,414 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scan_data.CHANNELS",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "opensmile.Smile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "opensmile.FeatureSet",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "opensmil... |
73787039549 | """
Code to explore the PDF and CDF of weight distributions.
We use truncated lognormals to define the distribution of excitatory connections.
We scale that by -8 for inhibitory connections.
We represent the inhibitory connections with a negative number as a convention to be consistent
with the network simulator (NEST), although technically conductances must be positive.
"""
import scipy.interpolate
import scipy.stats as st
import numpy as np
def _approx_pdf_from_cdf(cdf, vmin, vmax, n_samples=10**5):
"""numerically approximate the Probability Density Function from the cumulative"""
x = np.linspace(vmin, vmax, n_samples)
mid = .5 * (x[:-1] + x[1:])
derivative = np.diff(cdf(x)) / np.diff(x)
return scipy.interpolate.interp1d(mid, derivative, fill_value=0., bounds_error=False)
def _approx_inv_cdf_from_cdf(cdf, vmin, vmax, n_samples=10**5):
"""numerically approximate the inverse of a Cumulative Distribution Function"""
x = np.linspace(vmin, vmax, n_samples)
return scipy.interpolate.interp1d(cdf(x), x, fill_value=0., bounds_error=False)
class TruncatedLognormal:
"""
Represents a truncated, and possibly scaled, lognormal distribution.
"""
def __init__(self, loc, scale, shape, vmax, g=1):
self.loc = loc
self.scale = scale
self.shape = shape
self.vmax = vmax
self.g = g
self.base_lognorm = st.lognorm(
loc=self.loc,
scale=self.scale,
s=self.shape)
self.base_lognorm_cdf_vmax = self.base_lognorm.cdf(self.vmax)
self._pdf = _approx_pdf_from_cdf(self.cdf, *self.vrange)
self._icdf = _approx_inv_cdf_from_cdf(self.cdf, *self.vrange)
@property
def vrange(self) -> tuple:
"""truncated range of X"""
vrange = 0, self.vmax * self.g
if self.g < 0:
vrange = vrange[1], vrange[0]
return vrange
def linspace(self, num=50):
"""generate samples linearly on the domain of X"""
return np.linspace(*self.vrange, num=num)
def cdf(self, weight):
"""Cumulative Distribution Function"""
weight_norm = weight / self.g
prob = np.minimum(self.base_lognorm.cdf(weight_norm) / self.base_lognorm_cdf_vmax, 1)
if self.g < 0:
prob = 1 - prob
return prob
def pdf(self, weight):
"""Probability Density Function"""
return self._pdf(weight)
def inv_cdf(self, prob):
"""
Inverse of the Cumulative Distribution Function.
Maps from probability to values.
"""
return self._icdf(prob)
def rev_cdf(self, prob):
"""
Reversed Cumulative Distribution Function.
Cumulative summation is done right-to-left.
"""
return 1 - self.cdf(prob)
def mean(self):
"""Estimated mean from the distribution"""
x = self.linspace(1_000_000)
p = self.pdf(x)
p = p / np.sum(p)
mean = np.sum(x * p)
return mean
def var(self):
"""Estimated var from the distribution"""
mean = self.mean()
x = self.linspace(1_000_000)
p = self.pdf(x)
p = p / np.sum(p)
mean = np.sum(np.square(x - mean) * p)
return mean
def std(self):
"""Estimated std from the distribution"""
return np.sqrt(self.var())
def quantile(self, q):
"""Estimated quantile from the distribution"""
assert 0 <= q <= 1
return self.inv_cdf(q).item()
def median(self):
"""Estimated median from the distribution"""
return self.quantile(.5)
def min(self):
"""Min value of the distribution"""
return self.quantile(0)
def max(self):
"""Max value of the distribution"""
return self.quantile(1)
class ConnDist:
"""Combination of exc and inh weight distributions"""
def __init__(self, e_weights_loc, e_weights_scale, e_weights_shape, e_weights_vmax, g):
assert g < 0
self.exc = TruncatedLognormal(
e_weights_loc,
e_weights_scale,
e_weights_shape,
e_weights_vmax
)
self.inh = TruncatedLognormal(
e_weights_loc,
e_weights_scale,
e_weights_shape,
e_weights_vmax,
g=g,
)
@classmethod
def from_batch(cls, batch):
param_names = ['e_weights_loc', 'e_weights_scale', 'e_weights_vmax', 'e_weights_shape', 'g']
weight_dist_params = batch.reg[param_names].drop_duplicates()
assert len(weight_dist_params) == 1
weight_dist_params = weight_dist_params.iloc[0]
return cls(**weight_dist_params)
| comp-neural-circuits/tctx | tctx/analysis/wdist.py | wdist.py | py | 4,725 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interpolate.interp1d",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scip... |
32605878813 | import discord
import youtube_dl
from bot_token import TOKEN
if not TOKEN:
raise ValueError("Please add your token to bot_token.py")
client = discord.Client()
@client.event
async def on_message(message):
if message.author== client.user :
return
elif message.content.startswith("*l"):
msg = f'{message.content[3:]}Hello{message.author.mention}'
await client.send_message(message.channel, msg)
elif message.content.startswith("*chante"):
url= message.content[8:]
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run(TOKEN) | F3YoD/Bot-python | tamer2.py | tamer2.py | py | 669 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bot_token.TOKEN",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "discord.Client",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bot_token.TOKEN",
"line_number": 29,
"usage_type": "argument"
}
] |
74142898108 | from django.http import JsonResponse
from django.shortcuts import render
# Create your views here.
from django.views.generic import View
from django_redis import get_redis_connection
from redis import StrictRedis
from apps.goods.models import GoodsSKU
from utils.common import LoginRequiredViewMixin, BaseCartView
class CartAddView(BaseCartView):
def post(self, request, command='add'):
"""添加商品到购物车"""
params = super().post(request, command)
# 接受数据:user_id,sku_id,count
user_id, sku_id, count, sku = params['user_id'], \
params['sku_id'], \
params['count'], \
params['sku']
# print(user_id, sku_id, count)
# print('+'*50)
# 添加商品到购物车,如果redis中已有该商品的id,那么就增加它的数量
strict_redis = get_redis_connection()
# strict_redis = StrictRedis()
key = 'cart_%s' % user_id
val = strict_redis.hget(key, sku_id)
if val:
count += int(val)
# 库存逻辑判断
if count > sku.stock:
return JsonResponse({'code':5, 'errmsg':'库存不足'})
# 操作redis数据库存储商品到购物车
strict_redis.hset(key, sku_id, count)
total_count = 0
vals = strict_redis.hvals(key)
for val in vals:
total_count += int(val)
context = {
'code':0,
'total_count':total_count,
}
return JsonResponse(context)
class CartInfoView(LoginRequiredViewMixin, View):
"""购物车显示界面:需要先登录"""
def get(self, request):
# 查询当前登录用户添加到购物车中的所有商品
strict_redis = get_redis_connection()
key = 'cart_%s' % request.user.id
# 获取购物车中所有商品,返回一个字典,包含sku_id和对应的数量count
cart_dict = strict_redis.hgetall(key)
# 保存购物车中所有的商品对象
skus = []
# 商品总数量
total_count = 0
# 商品总金额
total_amount = 0
for sku_id, count in cart_dict.items():
try:
# 根据sku_id获取sku对象
sku = GoodsSKU.objects.get(id=sku_id)
# 列表中新增一个商品对象
skus.append(sku)
except Exception as e:
print(e)
# sku对象动态新增一个实例属性:count
sku.count = int(count)
# sku对象动态新增一个实例属性:amount
sku.amount = sku.price * sku.count
# 累加购物车中所有商品的数量和总金额
total_count += sku.count
total_amount += sku.amount
context = {
'skus': skus,
'total_count': total_count,
'total_amount': total_amount,
}
return render(request, 'cart.html', context)
class CartUpdateView(LoginRequiredViewMixin, BaseCartView):
def post(self, request, command='update'):
"""修改购物车商品数量"""
# print(CartUpdateView.mro())
# print('-' * 50)
params = super().post(request, command)
sku_id = params['sku_id']
count = params['count']
# print(sku_id)
# print(count)
# print('-' * 50)
# todo:业务处理:保存购物车商品数量
strict_redis = get_redis_connection()
key = 'cart_%s' % request.user.id
strict_redis.hset(key, sku_id, count)
# 响应json
return JsonResponse({'code': 0, 'message': '修改商品数量成功',})
class CartDeleteView(LoginRequiredViewMixin, BaseCartView):
def post(self, request, command='delete'):
"""删除购物车中的商品"""
# 获取请求参数:sku_id
sku_id = super().post(request, command)['sku_id']
# 业务处理:从redis中删除商品
strict_redis = get_redis_connection()
key = 'cart_%s' % request.user.id
strict_redis.hdel(key, sku_id)
# 响应请求
return JsonResponse({'code':0, 'message':'删除成功!'}) | xmstu/dailyfresh2 | dailyfresh/apps/cart/views.py | views.py | py | 4,266 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "utils.common.BaseCartView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django_redis.get_redis_connection",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 39,
"usage_type": "call"
},
{
... |
43529823665 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 11:51:37 2019
@author: javie
"""
import plotly_express as px
from plotly.offline import plot
def pl(df, r, var):
tmp = df[df.randomSeed.isin(r)]
plot(px.line(tmp, height=300 * len(r), x="tick",
y = var,
color="FirmNumID",
line_dash="scenario",
facet_row="randomSeed"
))
# Several variables melting columns
def plMelt(df, r, vars, id_vars=["randomSeed","scenario","tick","FirmNumID"]):
tmp = df[df.randomSeed.isin(r)]
tmp = tmp.melt(id_vars=id_vars, value_vars=vars)
plot(px.line(tmp, height=300 * len(r), x="tick",
y= "value",
color="FirmNumID",
line_dash="scenario",
facet_col="variable",
facet_row="randomSeed"
))
| javiergarciasanchez/businessCycles | businessCycles/exploreData/Python/Graphs_plotly.py | Graphs_plotly.py | py | 871 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "plotly.offline.plot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "plotly_express.line",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "plotly.offline.plot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "plotly_ex... |
14003038546 | from app.custom_queue import CustomQueue
from app.logger import get_logger
from datetime import datetime, timedelta
LOGGER = get_logger(__name__)
QUEUE_MAX_SIZE = 20
class Queues():
def __init__(self):
self.LS = CustomQueue(QUEUE_MAX_SIZE, 'LeftSingle')
self.LT = CustomQueue(QUEUE_MAX_SIZE, 'LeftTriple')
self.RT = CustomQueue(QUEUE_MAX_SIZE, 'RightTriple')
self.RS = CustomQueue(QUEUE_MAX_SIZE, 'RightSingle')
self.starting_time = datetime.now()
self.Total_time = self.starting_time
def add_time(self, queue: CustomQueue, time: timedelta):
self.Total_time += time
queue.time += time
queue.count += 1
def add_to_LS(self, skyer):
self.add_to(self.LS, skyer)
def add_to_LT(self, skyer):
self.add_to(self.LT, skyer)
def add_to_RT(self, skyer):
self.add_to(self.RT, skyer)
def add_to_RS(self, skyer):
self.add_to(self.RS, skyer)
def add_to(self, queue: CustomQueue, skyer):
queue.put(skyer)
LOGGER.debug(f'Esquiador entrou na fila: {queue.name}')
def normalize_time(self):
self.Total_time -= self.starting_time
self.LS.time -= self.starting_time
self.LT.time -= self.starting_time
self.RT.time -= self.starting_time
self.RS.time -= self.starting_time
def report_queue_time(self):
self.normalize_time()
total_count = self.LS.count + self.LT.count + self.RT.count + self.RS.count
if total_count:
LOGGER.info(f'Total time = {self.Total_time/total_count}')
else:
LOGGER.info('ninguem saiu de qualquer fila')
if self.LS.count:
LOGGER.info(f'{self.LS.name} time = {self.LS.time/self.LS.count}')
else:
LOGGER.info(f'ninguem saiu da fila {self.LS.name}')
if self.LT.count:
LOGGER.info(f'{self.LT.name} time = {self.LT.time/self.LT.count}')
else:
LOGGER.info(f'ninguem saiu da fila {self.LT.name}')
if self.RT.count:
LOGGER.info(f'{self.RT.name} time = {self.RT.time/self.RT.count}')
else:
LOGGER.info(f'ninguem saiu da fila {self.RT.name}')
if self.RS.count:
LOGGER.info(f'{self.RS.name} time = {self.RS.time/self.RS.count}')
else:
LOGGER.info(f'ninguem saiu da fila {self.RS.name}')
def queue_sizes(self):
LS_size = self.LS.qsize()
LT_size = self.LT.qsize()
RT_size = self.RT.qsize()
RS_size = self.RS.qsize()
return [LS_size, LT_size, RT_size, RS_size]
def count_queues_lenght(self):
LS_size, LT_size, RT_size, RS_size = self.queue_sizes()
LOGGER.debug(
f"""count_queues_lenght()
{'###'*3}
>Filas agora<
LeftSingle: {LS_size}
LeftTriple: {LT_size}
RightTriple: {RT_size}
RightSingle: {RS_size}
{'###'*3}
""")
| ViniciusLinharesAO/ski-slope-problem-uece-ppc | app/queues.py | queues.py | py | 3,005 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.logger.get_logger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "app.custom_queue.CustomQueue",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.custom_queue.CustomQueue",
"line_number": 12,
"usage_type": "call"
},
{
"ap... |
6066153310 | import pygame
from _draw import *
from _utils import *
class gui():
def __init__(self,
white,
screen,
width,
height,
smallNokiaFont,
hugeNokiaFont,
font,
bigFont,
hugeFont,
smallFont,
nanoFont,
themeColour,
exitButton,
nextButton,
dialogue,
sDialogue,
smsDialogue,
music,
borderSlide,
notificationDialogue,
user_input,
statusButton ,
inventoryButton ,
noteButton ,
nokiaFont ,
nanoNokiaFont ,
smsFont ,
musicFont,
jumboFont ,
gameTime ,
smsScrollDialogue,
squareFont,
squareFontH,
debugSwitch = True,
clicked=False,
):
self.white = white
self.screen = screen
self.width = width
self.height = height
self.smallNokiaFont = smallNokiaFont
self.hugeNokiaFont = hugeNokiaFont
self.font = font
self.bigFont = bigFont
self.hugeFont = hugeFont
self.smallFont = smallFont
self.nanoFont = nanoFont
self.themeColour = themeColour
self.exitButton = exitButton
self.nextButton = nextButton
self.dialogue = dialogue
self.sDialogue = sDialogue
self.smsDialogue = smsDialogue
self.music = music
self.borderSlide = borderSlide
self.notificationDialogue = notificationDialogue
self.user_input = user_input
self.statusButton = statusButton
self.inventoryButton = inventoryButton
self.noteButton = noteButton
self.nokiaFont = nokiaFont
self.nanoNokiaFont = nanoNokiaFont
self.smsFont = smsFont
self.musicFont = musicFont
self.jumboFont = jumboFont
self.gameTime = gameTime
self.smsScrollDialogue = smsScrollDialogue
self.squareFont = squareFont
self.squareFontH = squareFontH
self.debugSwitch = debugSwitch
self.clicked = clicked
self.greenA = (36,65,45)
self.greenB = (82,128,58)
self.greenC = (173,195,63)
self.greenD = (215,233,149)
self.darkGreen = (5,37,23)
self.buttonGreen = (47,75,45)
self.offwhite = (245,245,245)
self.screenDefault = (201,221,126)
self.screenColour = (201,221,126)
self.greenText = (29,153,29)
self.greenBorder = (127,187,73)
self.darkGrey = (44,52,56)
self.lightBlack = (40,41,35)
self.lightGrey = (72,77,79)
# ---------------Images
self.signal = pygame.image.load('pics/phoneLogos/signal.png')
self.bottomNavMock = pygame.image.load('pics/assets/mocks/navBottom.png')
self.bottomNav = pygame.image.load('pics/assets/nav/navBottom.png')
self.nextDayBtn = [pygame.image.load('pics/assets/nav/nextDay1.png'),pygame.image.load('pics/assets/nav/nextDay2.png')]
self.tileBackground = pygame.image.load('pics/assets/backgrounds/tile.png')
self.gradientBackground = pygame.image.load('pics/assets/backgrounds/gradient.png')
self.cubeBackground = pygame.image.load('pics/assets/backgrounds/cube.png')
# -------------widget images
self.widgetNode = [pygame.image.load('pics/assets/widgetNode/widgetNode1.png'),pygame.image.load('pics/assets/widgetNode/widgetNode2.png'),pygame.image.load('pics/assets/widgetNode/widgetNode3.png')]
self.smallActiveWidget = pygame.image.load('pics/assets/widgetNode/smallActiveWidget.png')
self.medActiveWidget = pygame.image.load('pics/assets/widgetNode/medActiveWidget.png')
self.medActiveWidgetLab = pygame.image.load('pics/assets/widgetNode/widgetMedLabel.png')
self.bigActiveWidget = pygame.image.load('pics/assets/widgetNode/bigActiveWidget.png')
# ----- Mech imgs
self.mechBoxMed = impFilesL('mechBoxMed1.png',tDir = 'pics/assets/mechBox/')
self.mechBoxBig = impFilesL('mechBoxBig1.png',tDir = 'pics/assets/mechBox/')
self.mechBoxGreen = impFilesL('mechBoxGreen1.png',tDir = 'pics/assets/mechBox/')
self.mechBoxMedLight = [pygame.image.load('pics/assets/mechBox/mechBoxMedLight1.png'),pygame.image.load('pics/assets/mechBox/mechBoxMedLight2.png'),pygame.image.load('pics/assets/mechBox/mechBoxMedLight3.png'),pygame.image.load('pics/assets/mechBox/mechBoxMedLight4.png')]
self.mechBtnMed = [pygame.image.load('pics/assets/buttons/mechBtnMed1.png'),pygame.image.load('pics/assets/buttons/mechBtnMed2.png')]
self.mechPlainBtnMed = [pygame.image.load('pics/assets/buttons/medMechBtn1.png'),pygame.image.load('pics/assets/buttons/medMechBtn2.png')]
self.extendableBox = [pygame.image.load('pics/assets/textBox/extendableDarkGreen1.png'),pygame.image.load('pics/assets/textBox/extendableDarkGreen2.png')]
self.notitfyBtnSmall = [pygame.image.load('pics/assets/buttons/buttonSmall1.png'),pygame.image.load('pics/assets/buttons/buttonSmall2.png')]
self.notitfyBtnMed = [pygame.image.load('pics/assets/buttons/buttonMed1.png'),pygame.image.load('pics/assets/buttons/buttonMed2.png')]
self.signal = pygame.image.load('pics/phoneLogos/signal.png')
self.minis = [pygame.image.load('pics/assets/minis/minibuttons1.png'),pygame.image.load('pics/assets/minis/minibuttons2.png'),pygame.image.load('pics/assets/minis/minibuttons3.png'),pygame.image.load('pics/assets/minis/minibuttons4.png'),pygame.image.load('pics/assets/minis/minibuttons5.png'),pygame.image.load('pics/assets/minis/minibuttons6.png'),pygame.image.load('pics/assets/minis/minibuttons7.png'),pygame.image.load('pics/assets/minis/minibuttons8.png'),pygame.image.load('pics/assets/minis/minibuttons9.png'),pygame.image.load('pics/assets/minis/minibuttons10.png')]
# ------mouse
self.mx = 0
self.my = 0
#buttons
self.sell = impFilesL('sell1.png',tDir = 'pics/assets/buttons/')
self.bank = impFilesL('bank1.png',tDir = 'pics/assets/buttons/')
self.auto = impFilesL('auto1.png',tDir = 'pics/assets/buttons/')
self.selectMe = impFilesL('selectme1.png',tDir = 'pics/assets/buttons/')
self.increment = impFilesL('increment1.png',tDir = 'pics/assets/buttons/')
self.decrement = impFilesL('decrement1.png',tDir = 'pics/assets/buttons/')
self.menuBG = None
self.hideExitButton = False
def border(self,colour=(128,0,0)):
self.bx,self.by = 0.1*self.width,0.1*self.height
self.bw,self.bh = 0.8*self.width,0.8*self.height
rect = pygame.draw.rect(self.screen, colour, [self.bx, self.by,self.bw , self.bh],4)
def mouseCollides(self,mousePos,x,y,w,h):
if mousePos[0] > x and mousePos[0] < x + w:
if mousePos[1] > y and mousePos[1] < y + h:
return(True)
return(False)
def incrementableWidget(self,x,y,text,value,inc=1,cap=100,userInput=None,incrementKey=None,insta=False,instaMessage='Auto On'):
"""+ button and text to increment and return value
"""
textx, texty = x+60,y+10
#---------exit if auto on
if(insta):
drawSelectableImage(self.increment[0],self.increment[1],(x,y),self,trim=False)
hov, tw,ty = drawText(self.screen,self.nanoNokiaFont, instaMessage,textx ,texty, self.greenD)
xEnd,yEnd = textx + tw, y + self.minis[5].get_rect().h
return(value,xEnd,yEnd)
# --------- display text
displayText = text + ' ' + str(value)
selected = drawSelectableImage(self.increment[0],self.increment[1],(x,y),self,trim=False)
if(userInput.upper() == incrementKey.upper()): selected = True
if(selected):
if(inc<=cap):
value = value + inc
else:
value = value + cap
hov, tw,ty = drawText(self.screen,self.nanoNokiaFont, displayText,textx ,texty, self.greenD)
xEnd,yEnd = textx + tw, y + self.minis[5].get_rect().h
return(value,xEnd,yEnd)
def incDecWidgetAbsolute(self,x,y,text,value,inc=1,cap=100,userInput="none",incrementKey="notset"):
"""+ button and text to increment and return value
"""
displayText = text + ' ' + str(value)
selected = drawSelectableImage(self.decrement[0],self.decrement[1],(x,y),self,trim=False)
if(userInput.upper() == incrementKey.upper()): selected = True
if(selected):
if((value - inc)>=0):
value = value - inc
else:
value = 0
x = x + self.decrement[0].get_rect().w
plusSelected = drawSelectableImage(self.increment[0],self.increment[1],(x,y),self,trim=False)
if(plusSelected):
if((value + inc)<=cap):
value = value + inc
else:
value = cap
textx, texty = x+60,y+10
hov, tw,ty = drawText(self.screen,self.nanoNokiaFont, displayText,textx ,texty, self.greenD)
xEnd,yEnd = textx + tw, y + self.minis[5].get_rect().h
return(value,xEnd,yEnd)
def debug(self,debugMessage):
if(self.debugSwitch):
print(debugMessage)
def debugDetailed(self,debugMessage):
if(self.debugSwitch=='detailed'):
print(debugMessage)
class notificationDialogue():
def __init__(self):
self.initialised = False
self.origText = ''
self.origSource = ''
self.textArray = []
self.colour = (0,0,0)
self.y = 0
self.senPos = 0
def drawDialogue(self,gui,myfont, text,pos,maxWidth,maxHeight,clicked, colour=(255, 255, 255),skip=False,verticalSep=1.1,maxVerticleLines=80,displayNextButton=False,source=None):
sx,sy = pos[0],pos[1]
x,y = sx,sy
tRemaining = ""
hovered = gui.mouseCollides((gui.mx,gui.my),x,y,maxWidth,maxHeight)
# reset if called by new function
if(self.origText!= text or self.origSource!= source):
self.initialised=False
self.origText = text
if(self.initialised== False):
# format paragraph into array of fitted sentences
self.origText = text
self.origSource = source
self.senPos = 0
dAr,para = [], ""
for word in text.split(' '):
pre = para
para += word + " "
textsurface = myfont.render(para, True, colour)
w = textsurface.get_rect().width
if(w>= maxWidth):
dAr.append(pre)
para = word + " "
dAr.append(para)
self.textArray = dAr
self.initialised = True
hTotal = 0
for sentence in range(0,len(self.textArray)):
textsurface = myfont.render(self.textArray[sentence], True, colour)
h = textsurface.get_rect().height
gui.screen.blit(textsurface,(x,y))
y = y + verticalSep*h
hTotal = hTotal + verticalSep*h
tRemaining = self.textArray[sentence+1:]
# Condition: If lines exceed specified MAX LINES, break here
if((sentence>=maxVerticleLines-1)): break
# Condition: If lines exceed specified HEIGHT
if(hTotal >= maxHeight): break
#if(displayNextButton): nextP = gui.nextButton.display(gui,noBorder=False)
# Condition: If lines remaining and clicked, go next page
if(clicked and hovered and (len(tRemaining)>0)):
self.textArray = tRemaining
| murchie85/bumdee | _gui.py | _gui.py | py | 12,565 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.image.load",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pygame.image... |
42539411350 | from django.shortcuts import render, redirect
from .models import *
import os
from django.conf import settings
from django.http import HttpResponse
import json
# Create your views here.
def cargarInicio(request):
productos = Producto.objects.all()
producto_perros = Producto.objects.filter(categoria_id=1)
producto_gatos = Producto.objects.filter(categoria_id=2)
return render(request,"inicio.html",{"prod" : productos, "prod_dogs":producto_perros, "prod_cats":producto_gatos})
def cargarAgregarProducto(request):
categorias = Categoria.objects.all()
productos = Producto.objects.all()
return render(request, "agregarProducto.html",{"cate":categorias,"prod":productos})
def agregarProducto(request):
#print("AGREGANDO PRODUCTOS A LA BBDD",request.POST)
v_sku = request.POST['txtSku']
v_precio = request.POST['txtPrecio']
v_nombre = request.POST['txtNombre']
v_imagen = request.FILES['txtImagen']
v_descripcion = request.POST['txtDescripcion']
v_stock = request.POST['txtStock']
v_categoria = Categoria.objects.get(id_categoria = request.POST['cmbCategoria'])
Producto.objects.create(sku = v_sku, precio = v_precio, nombre = v_nombre,imagen = v_imagen,descripcion = v_descripcion,stock = v_stock, categoria_id = v_categoria)
return redirect('/agregarProducto')
def cargarEditarProducto(request,sku):
producto = Producto.objects.get(sku = sku)
categorias = Categoria.objects.all()
return render(request,"editarProducto.html",{"prod":producto,"cate":categorias})
def editarProducto(request):
v_sku = request.POST['txtSku']
productoBD = Producto.objects.get(sku = v_sku)
v_precio = request.POST['txtPrecio']
v_nombre = request.POST['txtNombre']
v_descripcion = request.POST['txtDescripcion']
v_stock = request.POST['txtStock']
v_categoria = Categoria.objects.get(id_categoria = request.POST['cmbCategoria'])
try:
v_imagen = request.FILES['txtImagen']
ruta_img = os.path.join(settings.MEDIA_ROOT,str(productoBD.imagen))
os.remove(ruta_img)
except:
v_imagen = productoBD.imagen
productoBD.nombre = v_nombre
productoBD.precio = v_precio
productoBD.imagen = v_imagen
productoBD.descripcion = v_descripcion
productoBD.stock = v_stock
productoBD.categoria_id = v_categoria
productoBD.save()
return redirect('/agregarProducto')
def eliminarProducto(request,sku):
producto = Producto.objects.get(sku = sku)
ruta_img = os.path.join(settings.MEDIA_ROOT,str(producto.imagen))
os.remove(ruta_img)
producto.delete()
return redirect('/agregarProducto')
def carrito(request):
#print("CARRITO",request.body)
productos = json.loads(request.body)
for p in productos:
print("SKU",p['sku'])
print("CANTIDAD",p['cantidad'])
return HttpResponse("OK!") | GuillermoVillacuraTorres/PGY3121-012D | django/apps/Tienda/views.py | views.py | py | 2,897 | python | es | code | null | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 41,
"usage_type": "call"
},
{
"api_nam... |
39255343614 | from django.conf.urls import url
from network.views import views_auth
from network.views import views_app
urlpatterns = [
# Main page
url(r'^home/(?P<msg>.*)$', views_auth.main_page, name="Home"),
# url(r'$', views_auth.main_page, name="Home"),
# Auth urls
url(r'^login/(?P<info>.*)$', views_auth.login_page, name="Login"),
url(r'^logout', views_auth.logout_page, name="Logout"),
url(r'^registration', views_auth.registration_page),
# App urls
url(r'^userpage/(?P<usr_id>.[0-9])', views_app.user_page, name="UserPage"),
url(r'^userpage/wall/new_record', views_app.new_wall_record, name="NewWallRecord"),
url(r'^userpage/wall/new_like', views_app.new_like, name="AddLike"),
url(r'^userpage/wall/new_comment', views_app.new_comment, name="AddComment"),
url(r'^userpage/wall/delete_post', views_app.delete_post, name="DeletePost"),
url(r'^error/', views_app.error_page, name="UserPage"),
url(r'^im/', views_app.mail_page, name="UserMail"),
url(r'^send_msg/(?P<user_id>.[0-9])', views_app.send_msg, name="SendMessage"),
url(r'^friend_request/', views_app.send_friend_request, name="FriendRequest"),
url(r'^friends/', views_app.user_friends, name="Friends"),
url(r'^delete_friend/', views_app.delete_friend, name="Delete Friend"),
url(r'^sent/', views_app.user_sent_msgs, name="Sent msgs"),
url(r'^requests/', views_app.user_requests, name="Friend requests"),
url(r'^accept_request/', views_app.accept_request, name="Accept_request"),
url(r'^decline_request/', views_app.decline_request, name="Decline_request")
]
| Sipleman/Course-work_SocialNetwork | network/urls.py | urls.py | py | 1,612 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "network.views.views_auth.main_page",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "network.views.views_auth",
"line_number": 11,
"usage_type": "name"
},
{
... |
10422164463 | from __future__ import annotations
import traceback
from PySide6 import QtWidgets
from randovania.games.prime2.patcher.claris_randomizer import ClarisRandomizerExportError
def create_box_for_exception(val: Exception) -> QtWidgets.QMessageBox:
box = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Critical,
"An exception was raised",
(
f"An unhandled Exception occurred:\n{val}\n\n"
"When reporting, make sure to paste the entire contents of the following box."
"\nIt has already be copied to your clipboard."
),
QtWidgets.QMessageBox.Ok,
)
from randovania.gui.lib import common_qt_lib
common_qt_lib.set_default_window_icon(box)
detailed_exception = "".join(traceback.format_exception(val))
if isinstance(val, ClarisRandomizerExportError):
detailed_exception += "\n\n"
detailed_exception += val.detailed_text()
box.setDetailedText(detailed_exception)
common_qt_lib.set_clipboard(detailed_exception)
# Expand the detailed text
for button in box.buttons():
if box.buttonRole(button) == QtWidgets.QMessageBox.ActionRole:
button.click()
break
box_layout: QtWidgets.QGridLayout = box.layout()
box_layout.addItem(
QtWidgets.QSpacerItem(600, 0, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding),
box_layout.rowCount(),
0,
1,
box_layout.columnCount(),
)
return box
| randovania/randovania | randovania/gui/lib/error_message_box.py | error_message_box.py | py | 1,513 | python | en | code | 165 | github-code | 6 | [
{
"api_name": "PySide6.QtWidgets.QMessageBox",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PySide6.QtWidgets",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PySide6.QtWidgets.QMessageBox",
"line_number": 12,
"usage_type": "attribute"
},
{
... |
75342220666 | from django.shortcuts import render
from .forms import getData, getTraningInfo
import requests
from bs4 import BeautifulSoup
from datetime import date, datetime
import folium
import geocoder
# Create your views here.
runs = {}
def calculate_difference(key):
run_date_str = runs[key]["date"][:10] + " 0:0:0"
today = datetime.now()
run_day = datetime.strptime(run_date_str, "%Y-%m-%d %H:%M:%S")
difference = (run_day - today).days
minus = 0
if today.weekday() != 0:
minus += 7 - today.weekday()
return difference - minus
def calculate_distance(key):
distance = ''
i = 0
while runs[key]["distance"][i] != "k":
distance += runs[key]["distance"][i]
i += 1
return int(distance)
def calculate_speed(hour, minutes, distance):
minutes += hour*60
speed = minutes / distance
return speed
def speed_to_str(speed):
minutes = speed // 1
sek = speed - minutes
sek *= 60
if sek < 10:
sek = "0" + str(round(sek))
else:
sek = str(round(sek))
minutes = str(round(minutes))
return minutes + ":" + sek
def basic_introduction(weeks):
plan4 = {
'1': {'pon': 'odpoczynek', 'wt': 'bieg 10 min', 'sr': 'opdoczynek', 'czw': 'bieg 10 min', 'pt': 'odpoczynek',
'weekend': 'bieg 15 min'},
'2': {'pon': 'odpoczynek', 'wt': 'bieg 15 min', 'sr': 'opdoczynek', 'czw': 'bieg 15 min', 'pt': 'odpoczynek',
'weekend': 'bieg 20 min'},
'3': {'pon': 'odpoczynek', 'wt': 'bieg 20 min', 'sr': 'opdoczynek', 'czw': 'bieg 25 min', 'pt': 'odpoczynek',
'weekend': 'bieg 25 min'},
'4': {'pon': 'odpoczynek', 'wt': 'bieg 25 min', 'sr': 'opdoczynek', 'czw': 'bieg 30 min', 'pt': 'odpoczynek',
'weekend': 'bieg 30 min'}}
plan5 = {
'1': {'pon': 'odpoczynek', 'wt': 'bieg 10 min', 'sr': 'opdoczynek', 'czw': 'bieg 10 min', 'pt': 'odpoczynek',
'weekend': 'bieg 10 min'},
'2': {'pon': 'odpoczynek', 'wt': 'bieg 15 min', 'sr': 'opdoczynek', 'czw': 'bieg 15 min', 'pt': 'odpoczynek',
'weekend': 'bieg 15 min'},
'3': {'pon': 'odpoczynek', 'wt': 'bieg 20 min', 'sr': 'opdoczynek', 'czw': 'bieg 20 min', 'pt': 'odpoczynek',
'weekend': 'bieg 20 min'},
'4': {'pon': 'odpoczynek', 'wt': 'bieg 25 min', 'sr': 'opdoczynek', 'czw': 'bieg 25 min', 'pt': 'odpoczynek',
'weekend': 'bieg 25 min'},
'5': {'pon': 'odpoczynek', 'wt': 'bieg 30 min', 'sr': 'opdoczynek', 'czw': 'bieg 30 min', 'pt': 'odpoczynek',
'weekend': 'bieg 30 min'}}
plan2 = {
'1': {'pon': 'odpoczynek', 'wt': 'bieg 10 min', 'sr': 'opdoczynek', 'czw': 'bieg 15 min', 'pt': 'odpoczynek',
'weekend': 'bieg 15 min'},
'2': {'pon': 'odpoczynek', 'wt': 'bieg 15 min', 'sr': 'opdoczynek', 'czw': 'bieg 20 min', 'pt': 'odpoczynek',
'weekend': 'bieg 30 min'},
}
if weeks == 4:
return plan4, weeks - 4, 5
elif weeks == 5:
return plan5, weeks - 5, 6
else:
print("tu jestem")
return plan2, 2, 3
def introduction(weeks, actual_week, distance, week, mode, plan, weeks_for_introduction=0):
mins = 0
if mode == "Basic":
# pocztatkowy dystans dla basic
mins = 2
if distance < 11:
if weeks_for_introduction == 0:
# ilosc tygodni na dostosowanie dystansu
weeks_for_introduction = 4
elif distance < 22:
if weeks_for_introduction == 0:
weeks_for_introduction = 10
else:
if weeks_for_introduction == 0:
weeks_for_introduction = 15
# jako ze dystans jest bardzo duzy trening odbywa sie na max 3/4 jego wartosic
distance *= 0.75
if mode == "Medium":
mins = 5
if distance < 22:
if weeks_for_introduction == 0:
weeks_for_introduction = 4
else:
if weeks_for_introduction == 0:
weeks_for_introduction = 10
distance *= 0.75
if mode == "Advance":
mins = 10
if weeks_for_introduction == 0:
weeks_for_introduction = 10
distance *= 0.75
# ilosc kilometrow jaka zwiekszamy co kazdy tydzien
jump = (distance - mins) / (weeks_for_introduction - 1)
# iterowanie przez kazdy tydzien traningu wprowadzajacego
for i in range(actual_week, actual_week + weeks_for_introduction):
plan[str(i)] = {}
weeks -= 1
# iterowanie przez kazdy dzien tygodnia (weekend jako 1 dzien czyli mozna se wybrac sob lub nd)
for day in range(0, len(week)):
if day % 2 == 0:
plan[str(i)][week[day]] = "odpoczynek"
elif (day == 1 or day == 3) and mins > 5:
plan[str(i)][week[day]] = "bieg na " + str(round(mins / 2)) + "km"
else:
plan[str(i)][week[day]] = "bieg na " + str(round(mins)) + "km"
mins += jump
#aktualizowanie aktualnego tygonia
actual_week += weeks_for_introduction
return plan, weeks, actual_week
def full_training(weeks, actual_week, distance, week, mode, plan, speed):
# range (actual_week, actual_week + weeks)
if mode == "Basic":
# min predkosc po introduction ktora jest zwiekszana z klejnymi tygodniami
min_speed = 10
if distance >= 22:
distance *= 0.75
elif mode == "Medium":
min_speed = 8
if distance >= 22:
distance *= 0.75
else:
min_speed = 7
if distance >= 22:
distance *= 0.75
# zwiekszanie predkosci co tydzien o jump
jump = (min_speed - speed) / weeks
for i in range(actual_week, actual_week + weeks):
plan[str(i)] = {}
min_speed -= jump
weeks -= 1
actual_week += 1
for day in range(0, len(week)):
if day % 2 == 0:
plan[str(i)][week[day]] = "odpoczynek"
elif day == 1 and 5 < distance < 11:
plan[str(i)][week[day]] = "bieg na " + str(round(distance / 2)) + "km w czasie " + \
speed_to_str(min_speed * 0.7) + " min/km"
elif day == 1 and 5 < distance < 22:
plan[str(i)][week[day]] = "bieg na " + str(round(distance / 2)) + "km w czasie " + \
speed_to_str(min_speed * 0.8) + " min/km"
elif day == 1 and 5 < distance:
plan[str(i)][week[day]] = "bieg na " + str(round(distance / 2)) + "km w czasie " + \
speed_to_str(min_speed * 0.9) + " min/km"
elif day == 3 and mode != "Advance":
plan[str(i)][week[day]] = "bieg interwalowy: 5x (bieg 1.5 min na maksimum mozliwosci + " \
"2 min wolnego truchtu) + wybiganie na " + str(round(distance / 2)) + "km"
elif day == 3:
plan[str(i)][week[day]] = "bieg interwalowy: 5x (bieg 1.5 min na maksimum mozliwosci pod gorke + " \
"2 min wolnego truchtu z gorki) + wybiganie na " + \
str(round(distance / 2)) + "km"
else:
plan[str(i)][week[day]] = "bieg na " + str(distance) + "km w czasie " + speed_to_str(min_speed) + \
" min/km"
return plan, weeks, actual_week
def home(request):
global runs
runs = {}
if request.method == "POST":
runs = {}
# pobranie danych z forms po wcisnieciu przycisku
form = getData(request.POST)
# ustawienie zmiennej na global w celu modyfikacji dict runs
if form.is_valid():
# pobieranie danych
city = form.cleaned_data["city"]
date_from_wrong = form.cleaned_data["date_from"]
date_to_wrong = form.cleaned_data["date_to"]
distance_from = form.cleaned_data["distance_from"]
distance_to = form.cleaned_data["distance_to"]
# zamiana daty
if date_from_wrong is not None:
date_from_correct = str(date_from_wrong.year) + "-" + str(date_from_wrong.month) + "-" + \
str(date_from_wrong.day)
else:
date_from_correct = ""
if date_to_wrong is not None:
date_to_correct = str(date_to_wrong.year) + "-" + str(date_to_wrong.month) + "-" + \
str(date_to_wrong.day)
else:
date_to_correct = ""
# wyczyszczenie input-ow
form = getData()
# pobranie danych ze strony
url = "https://run-log.com/events/?terms=" + city + "&date_from=" + date_from_correct + \
"&date_to=" + date_to_correct + "&distance_from=" + str(distance_from) + \
"&distance_to=" + str(distance_to) + "&location_radius=&action="
website = requests.get(url)
result = website.text
doc = BeautifulSoup(result, "html.parser")
table = doc.tbody
trs = table.contents
i = 0
# iterowanie po kazdym elemenecie tabeli z danymi zawodow
for tr in trs:
i += 1
# sprawdzenie czy w tabeli istenieja biegi oraz czy nie sprawdzania jest pusty wiersz
if i % 2 == 0 and i <= 10 and len(tr.contents) >= 10:
run = {}
date, name, distance, shit, location = tr.contents[1::2]
run["date"] = date.text
run["distance"] = distance.text.strip()
run["location"] = location.text
run["number"] = i/2
name = name.a.string
# wyszukiwanie linkow do obrazu dla kazdego miasta w ktorym jest bieg
r = requests.get(
'https://commons.wikimedia.org/w/index.php?search=' + run["location"]
+ '&title=Special:MediaSearch&go=Go&type=image')
result = r.text
doc = BeautifulSoup(result, "html.parser")
images = doc.find('a', {'class': 'sdms-image-result'})
print(images)
if not images:
run["image"] = "#"
else:
r = requests.get(images['href'])
result = r.text
doc = BeautifulSoup(result, "html.parser")
doc2 = doc.find('div', {'class': 'mw-body-content'})
image = doc2.find('img')
run["image"] = image['src']
# w wypadku wystapnie biegu z taka sama nazwa dodanie numerka do nazyw
if name in runs:
runs[name+" ("+str(i/2)[0]+")"] = run
else:
runs[name] = run
else:
form = getData()
return render(request, "runsite/home.html", {"Data": form, "Runs": runs})
def run_plan(request):
# pobranie url storny (zawiera index dictionary z dpowiednimi zawodami)
url = int(request.build_absolute_uri()[22])
key = list(runs.keys())[url-1]
working = 1
# oblicznie ile dni oraz tygodni jest do zawodow
days = calculate_difference(key)
weeks = days//7
week = ['pon', 'wt', 'sr', 'cw', 'pt', 'weekend']
plan = {}
# konwertowanie dystansu ze slownika na typ int (pomijanie metrow)
distance = calculate_distance(key)
# generowanie mapy ze znacznikiem lokalizacji biegu
try:
location = geocoder.location(runs[key]['location'])
lat = location.lat
lng = location.lng
mapa = folium.Map(location=[lat, lng], zoom_start=12)
folium.Marker([lat, lng]).add_to(mapa)
except:
location = geocoder.osm('PL')
lat = location.lat
lng = location.lng
mapa = folium.Map(location=[lat, lng], zoom_start=12)
mapa = mapa._repr_html_()
if request.method == "POST":
working = 1
# pobranie danych z forms po wcisnieciu przycisku
form = getTraningInfo(request.POST)
if form.is_valid():
type_of_training = form.cleaned_data["type"]
time_hours = form.cleaned_data["time_hours"]
if time_hours:
try:
time_hours = int(time_hours)
except ValueError:
working = 0
time_hours = 0
else:
time_hours = 0
time_minutes = form.cleaned_data["time_minutes"]
if time_minutes:
try:
time_minutes = int(time_minutes)
except ValueError:
working = 0
time_minutes = 0
else:
time_minutes = 0
speed = calculate_speed(time_hours, time_minutes, distance)
if time_minutes < 0 or time_hours < 0 or speed < 2.5:
working = 0
form = getTraningInfo()
if type_of_training == "Basic":
speed *= 1.2
if weeks <= 3:
print("nie da sie wygnerowa traningu1")
working = 0
elif weeks <= 20:
#pierwszy tryb (najkrotrze zawody)
if distance < 11:
if weeks < 6:
plan, weeks, actual_week = basic_introduction(weeks)
elif weeks >= 6:
# pamietaj 6 - 2(basic_introduction)
# pamietaj ze full_training() z tych weekow co zostaly po introduction
plan, dif, actual_week = basic_introduction(2)
print(weeks)
# odjecie od pozostalych tygodni juz wykorzystanych
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
#drugi tryb (srednio dlugie zaowdy)
elif distance < 22:
if weeks < 12:
print("nie da sie wygenerowac treningu2")
working = 0
elif weeks >= 12:
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
print(weeks)
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
#trzeci tryb(dlugie zawody)
else:
if weeks < 17:
print("nie da sie wygenerowac treningu2")
working = 0
if weeks >= 17:
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
# ----------------------------------
else:
if distance < 11:
# wyliczenie na korym tygoniu konczy sie introducion (+2 by uwzglednic basic_introdution)
weeks_for_introduction = round((weeks * 0.2)//1 + 2)
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
elif distance < 22:
weeks_for_introduction = round((weeks * 0.5) // 1 + 2)
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
weeks_for_introduction = round((weeks * 0.75) // 1 + 2)
plan, dif, actual_week = basic_introduction(2)
weeks -= dif
plan, weeks, actual_week = introduction(weeks, actual_week, distance, week,
type_of_training, plan, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
elif type_of_training == "Medium":
if weeks <= 3:
print("nie da sie wygnerowa traningu1")
working = 0
elif distance < 11:
# dla malego dystansu w trybie medium nie ma introduction
plan, weeks, actual_week = full_training(weeks, 1, distance, week,
type_of_training, {}, speed)
print(plan)
print(weeks)
print(actual_week)
elif weeks <= 20:
if distance < 22:
if weeks < 4:
print("nie da sie wygnerowa traningu2")
working = 0
else:
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {})
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
if weeks < 10:
print("nie da sie wygnerowa traningu2")
working = 0
else:
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {})
print(plan)
print(weeks)
print(actual_week)
print(speed)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
if distance < 22:
weeks_for_introduction = round((weeks * 0.2) // 1)
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {}, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
weeks_for_introduction = round((weeks * 0.5) // 1)
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {}, weeks_for_introduction)
print(plan)
print(weeks)
print(actual_week)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
speed *= 0.9
if weeks <= 3:
print("nie da sie wygnerowa traningu1")
working = 0
elif distance < 22:
# dla malego dystansu oraz sredniego w trybie advance nie ma introduction
plan, weeks, actual_week = full_training(weeks, 1, distance, week,
type_of_training, {}, speed)
print(plan)
print(weeks)
print(actual_week)
elif weeks < 10:
print("nie da sie wygnerowa traningu3")
working = 0
elif weeks <= 20:
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {})
#print(plan)
print(weeks)
print(actual_week)
print(speed)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
else:
weeks_for_introduction = round((weeks * 0.5) // 1)
plan, weeks, actual_week = introduction(weeks, 1, distance, week,
type_of_training, {}, weeks_for_introduction)
#print(plan)
print(weeks)
print(actual_week)
print(speed)
plan, weeks, actual_week = full_training(weeks, actual_week, distance, week,
type_of_training, plan, speed)
print(plan)
print(weeks)
print(actual_week)
if working == 0:
plan = {}
for name, values in plan.items():
print(name)
print(values)
else:
form = getTraningInfo()
return render(request, "runsite/runPlan.html", {"Forms": form, "Key": key, "Run": runs[key], "Mapa": mapa,
"Plan": plan, "Working": working})
| kaczorwarka/Running-Events-Search-Engine-and-Traning-Plan-Generator | runsite/views.py | views.py | py | 26,067 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "da... |
34493734325 | from typing import Dict, List, Optional, Tuple, Union
from flask import (
abort, g, jsonify, render_template, request, make_response, Response
)
from werkzeug.exceptions import (
BadRequest, Forbidden, HTTPException, InternalServerError, NotFound
)
from plot_weather import (BAD_REQUEST_IMAGE_DATA,
INTERNAL_SERVER_ERROR_IMAGE_DATA, DebugOutRequest,
app, app_logger, app_logger_debug)
from plot_weather.dao.weathercommon import WEATHER_CONF
from plot_weather.dao.weatherdao import WeatherDao
from plot_weather.dao.devicedao import DeviceDao, DeviceRecord
from plot_weather.db.sqlite3conv import DateFormatError, strdate2timestamp
from plot_weather.plotter.plotterweather import (
ImageDateType, gen_plot_image, ImageDateParams, ParamKey
)
from werkzeug.datastructures import Headers, MultiDict
import psycopg2
from psycopg2.pool import SimpleConnectionPool
from psycopg2.extensions import connection
import plot_weather.util.dateutil as date_util
APP_ROOT: str = app.config["APPLICATION_ROOT"]
# エラーメッセージの内容 ※messages.confで定義
MSG_REQUIRED: str = app.config["MSG_REQUIRED"]
MSG_INVALID: str = app.config["MSG_INVALID"]
MSG_NOT_FOUND: str = app.config["MSG_NOT_FOUND"]
# ヘッダー
# トークン ※携帯端末では必須, 一致 ※ない場合は不一致とみなす
# messages.conf で定義済み
# 端末サイズ情報 ※携帯端末では必須, 形式は 幅x高さx密度
MSG_PHONE_IMG: str = "phone image size"
REQUIRED_PHONE_IMG: str = f"401,{MSG_PHONE_IMG} {MSG_REQUIRED}"
INVALID_PHONE_IMG: str = f"402,{MSG_PHONE_IMG} {MSG_INVALID}"
# リクエストパラメータ
PARAM_DEVICE: str = "device_name"
PARAM_START_DAY: str = "start_day"
PARAM_BOFORE_DAYS: str = "before_days"
PARAM_YEAR_MONTH: str = "year_month"
# リクエストパラメータエラー時のコード: 421番台以降
# デバイス名: 必須, 長さチェック (1-20byte), 未登録
DEVICE_LENGTH: int = 20
# デバイスリスト取得クリエスと以外の全てのリクエスト
REQUIRED_DEVICE: str = f"421,{PARAM_DEVICE} {MSG_REQUIRED}"
INVALIDD_DEVICE: str = f"422,{PARAM_DEVICE} {MSG_INVALID}"
DEVICE_NOT_FOUND: str = f"423,{PARAM_DEVICE} {MSG_NOT_FOUND}"
# 期間指定画像取得リクエスト
# (1)検索開始日["start_day"]: 任意 ※未指定ならシステム日付を検索開始日とする
# 日付形式(ISO8601: YYYY-mm-dd), 10文字一致
INVALID_START_DAY: str = f"431,{PARAM_START_DAY} {MSG_INVALID}"
# (2)検索開始日から N日前 (1,2,3,7日): 必須
REQUIRED_BOFORE_DAY: str = f"433,{PARAM_BOFORE_DAYS} {MSG_REQUIRED}"
INVALID_BOFORE_DAY: str = f"434,{PARAM_BOFORE_DAYS} {MSG_INVALID}"
# 月間指定画像取得リクエスト
# 年月: 必須, 形式(YYYY-mm), 7文字一致
REQUIRED_YEAR_MONTH: str = f"435,{PARAM_YEAR_MONTH} {MSG_REQUIRED}"
INVALID_YEAR_MONTH: str = f"436,{PARAM_YEAR_MONTH} {MSG_INVALID}"
# エラーメッセージを格納する辞書オブジェクト定義
MSG_DESCRIPTION: str = "error_message"
# 固定メッセージエラー辞書オブジェクト
ABORT_DICT_UNMATCH_TOKEN: Dict[str, str] = {MSG_DESCRIPTION: app.config["UNMATCH_TOKEN"]}
# 可変メッセージエラー辞書オブジェクト: ""部分を置き換える
ABORT_DICT_BLANK_MESSAGE: Dict[str, str] = {MSG_DESCRIPTION: ""}
def get_connection() -> connection:
if 'db' not in g:
conn_pool: SimpleConnectionPool = app.config["postgreSQL_pool"]
g.db: connection = conn_pool.getconn()
g.db.set_session(readonly=True, autocommit=True)
if app_logger_debug:
app_logger.debug(f"g.db:{g.db}")
return g.db
@app.teardown_appcontext
def close_connection(exception=None) -> None:
db: connection = g.pop('db', None)
if app_logger_debug:
app_logger.debug(f"db:{db}")
if db is not None:
app.config["postgreSQL_pool"].putconn(db)
@app.route(APP_ROOT, methods=["GET"])
def index() -> str:
"""本日データ表示画面 (初回リクエストのみ)
:return: 本日データ表示HTMLページ (matplotlibでプロットした画像含む)
"""
if app_logger_debug:
app_logger.debug(request.path)
try:
conn: connection = get_connection()
# 年月日リスト取得
dao = WeatherDao(conn, logger=app_logger)
yearMonthList: List[str] = dao.getGroupbyMonths(
device_name=WEATHER_CONF["DEVICE_NAME"],
start_date=WEATHER_CONF["STA_YEARMONTH"],
)
if app_logger_debug:
app_logger.debug(f"yearMonthList:{yearMonthList}")
# 本日データプロット画像取得
image_date_params = ImageDateParams(ImageDateType.TODAY)
img_base64_encoded: str = gen_plot_image(
conn, image_date_params=image_date_params, logger=app_logger
)
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.codde, InternalServerError(original_exception=exp))
strToday: str = app.config.get("STR_TODAY", "")
titleSuffix: str = app.config.get("TITLE_SUFFIX", "")
defaultMainTitle: str = strToday + titleSuffix
return render_template(
"showplotweather.html",
ip_host=app.config["SERVER_NAME"],
app_root_url=APP_ROOT,
path_get_today="/gettoday",
path_get_month="/getmonth/",
str_today=strToday,
title_suffix=titleSuffix,
info_today_update_interval=app.config.get("INFO_TODAY_UPDATE_INTERVAL"),
default_main_title=defaultMainTitle,
year_month_list=yearMonthList,
img_src=img_base64_encoded,
)
@app.route("/plot_weather/gettoday", methods=["GET"])
def getTodayImage() -> Response:
"""本日データ取得リクエスト(2回以降) JavaScriptからのリクエスト想定
:return: jSON形式(matplotlibでプロットした画像データ(形式: png)のbase64エンコード済み文字列)
(出力内容) jSON('data:image/png;base64,... base64encoded data ...')
"""
if app_logger_debug:
app_logger.debug(request.path)
try:
conn: connection = get_connection()
# 本日データプロット画像取得
image_date_params = ImageDateParams(ImageDateType.TODAY)
img_base64_encoded: str = gen_plot_image(
conn, image_date_params, logger=app_logger
)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
return _createErrorImageResponse(InternalServerError.code)
return _createImageResponse(img_base64_encoded)
@app.route("/plot_weather/getmonth/<yearmonth>", methods=["GET"])
def getMonthImage(yearmonth) -> Response:
"""要求された年月の月間データ取得
:param yearmonth str: 年月 (例) 2022-01
:return: jSON形式(matplotlibでプロットした画像データ(形式: png)のbase64エンコード済み文字列)
(出力内容) jSON('data:image/png;base64,... base64encoded data ...')
"""
if app_logger_debug:
app_logger.debug(request.path)
try:
# リクエストパラメータの妥当性チェック: "YYYY-mm" + "-01"
chk_yyyymmdd = yearmonth + "-01"
# 日付チェック(YYYY-mm-dd): 日付不正の場合例外スロー
strdate2timestamp(chk_yyyymmdd, raise_error=True)
conn: connection = get_connection()
# 指定年月(year_month)データプロット画像取得
image_date_params = ImageDateParams(ImageDateType.YEAR_MONTH)
param: Dict[ParamKey, str] = image_date_params.getParam()
param[ParamKey.YEAR_MONTH] = yearmonth
image_date_params.setParam(param)
img_base64_encoded: str = gen_plot_image(
conn, image_date_params, logger=app_logger
)
except DateFormatError as dfe:
# BAD Request
app_logger.warning(dfe)
return _createErrorImageResponse(BadRequest.code)
except psycopg2.Error as db_err:
# DBエラー
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
# バグ, DBサーバーダウンなど想定
app_logger.error(exp)
return _createErrorImageResponse(InternalServerError.code)
return _createImageResponse(img_base64_encoded)
@app.route("/plot_weather/getlastdataforphone", methods=["GET"])
def getLastDataForPhone() -> Response:
"""最新の気象データを取得する (スマートホン専用)
[仕様変更] 2023-09-09
(1) リクエストパラメータ追加
device_name: デバイス名 ※必須
:param: request parameter: device_name="xxxxx"
"""
if app_logger_debug:
app_logger.debug(request.path)
# Debug output request.headers or request.arg or both
_debugOutRequestObj(request, debugout=DebugOutRequest.HEADERS)
# トークン必須
headers: Headers = request.headers
if not _matchToken(headers):
abort(Forbidden.code, ABORT_DICT_UNMATCH_TOKEN)
# デバイス名必須
param_device_name: str = _checkDeviceName(request.args)
try:
conn: connection = get_connection()
# 現在時刻時点の最新の気象データ取得
dao = WeatherDao(conn, logger=app_logger)
rec_count: int
row: Optional[Tuple[str, float, float, float, float]]
# デバイス名に対応する最新のレコード取得
row = dao.getLastData(device_name=param_device_name)
if row:
rec_count = 1
measurement_time, temp_out, temp_in, humid, pressure = row
return _responseLastDataForPhone(
measurement_time, temp_out, temp_in, humid, pressure, rec_count)
else:
# デバイス名に対応するレコード無し
rec_count = 0
return _responseLastDataForPhone(None, None, None, None, None, rec_count)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
@app.route("/plot_weather/getfirstregisterdayforphone", methods=["GET"])
def getFirstRegisterDayForPhone() -> Response:
"""デバイスの観測データの初回登録日を取得する (スマートホン専用)
[仕様追加] 2023-09-13
:param: request parameter: device_name="xxxxx"
"""
if app_logger_debug:
app_logger.debug(request.path)
# Debug output request.headers or request.arg or both
_debugOutRequestObj(request, debugout=DebugOutRequest.HEADERS)
# トークン必須
headers: Headers = request.headers
if not _matchToken(headers):
abort(Forbidden.code, ABORT_DICT_UNMATCH_TOKEN)
# デバイス名必須
param_device_name: str = _checkDeviceName(request.args)
try:
conn: connection = get_connection()
dao = WeatherDao(conn, logger=app_logger)
# デバイス名に対応する初回登録日取得
first_register_day: Optional[str] = dao.getFisrtRegisterDay(param_device_name)
if app_logger_debug:
app_logger.debug(f"first_register_day[{type(first_register_day)}]: {first_register_day}")
if first_register_day:
return _responseFirstRegisterDayForPhone(first_register_day, 1)
else:
# デバイス名に対応するレコード無し
return _responseFirstRegisterDayForPhone(None, 0)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
@app.route("/plot_weather/gettodayimageforphone", methods=["GET"])
def getTodayImageForPhone() -> Response:
"""本日データ画像取得リクエスト (スマートホン専用)
[仕様変更] 2023-09-09
(1) リクエストパラメータ追加
device_name: デバイス名 ※必須
(2) レスポンスにレコード件数を追加 ※0件エラーの抑止
:param: request parameter: device_name="xxxxx"
:return: jSON形式(matplotlibでプロットした画像データ(形式: png)のbase64エンコード済み文字列)
(出力内容) jSON('data:': 'img_src':'image/png;base64,... base64encoded data ...',
'rec_count':xxx)
"""
if app_logger_debug:
app_logger.debug(request.path)
_debugOutRequestObj(request, debugout=DebugOutRequest.HEADERS)
# トークン必須
headers: Headers = request.headers
if not _matchToken(headers):
abort(Forbidden.code, ABORT_DICT_UNMATCH_TOKEN)
# デバイス名必須
param_device_name: str = _checkDeviceName(request.args)
# 表示領域サイズ+密度は必須: 形式(横x縦x密度)
str_img_size: str = _checkPhoneImageSize(headers)
try:
conn: connection = get_connection()
image_date_params = ImageDateParams(ImageDateType.TODAY)
param: Dict[ParamKey, str] = image_date_params.getParam()
param[ParamKey.PHONE_SIZE] = str_img_size
image_date_params.setParam(param)
rec_count: int
img_base64_encoded: str
rec_count, img_base64_encoded = gen_plot_image(
conn, param_device_name, image_date_params, logger=app_logger
)
return _responseImageForPhone(rec_count, img_base64_encoded)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
@app.route("/plot_weather/getbeforedaysimageforphone", methods=["GET"])
def getBeforeDateImageForPhone() -> Response:
"""過去経過日指定データ画像取得リクエスト (スマートホン専用)
[仕様変更] 2023-09-09
(1) リクエストパラメータ追加
device_name: デバイス名 ※必須
start_day: 検索開始日(iso8601形式) ※任意
(2) レスポンスにレコード件数を追加 ※0件エラーの抑止
:param: request parameter: ?device_name=xxxxx&start_day=2023-05-01&before_days=(2|3|7)
:return: jSON形式(matplotlibでプロットした画像データ(形式: png)のbase64エンコード済み文字列)
(出力内容) jSON('data:': 'img_src':'image/png;base64,... base64encoded data ...',
'rec_count':xxx)
"""
if app_logger_debug:
app_logger.debug(request.path)
_debugOutRequestObj(request, debugout=DebugOutRequest.BOTH)
# トークン必須
headers = request.headers
if not _matchToken(headers):
abort(Forbidden.code, ABORT_DICT_UNMATCH_TOKEN)
# デバイス名 ※必須チェック
param_device_name: str = _checkDeviceName(request.args)
# 検索開始日 ※任意、指定されている場合はISO8601形式チェック
str_start_day: Optional[str] = _checkStartDay(request.args)
if str_start_day is None:
# 検索開始日がない場合は当日を設定
str_start_day = date_util.getTodayIsoDate()
# Check before_days query parameter
str_before_days: str = _checkBeforeDays(request.args)
# 表示領域サイズ+密度は必須: 形式(横x縦x密度)
str_img_size: str = _checkPhoneImageSize(headers)
try:
conn: connection = get_connection()
image_date_params = ImageDateParams(ImageDateType.RANGE)
param: Dict[ParamKey, str] = image_date_params.getParam()
param[ParamKey.START_DAY] = str_start_day
param[ParamKey.BEFORE_DAYS] = str_before_days
param[ParamKey.PHONE_SIZE] = str_img_size
image_date_params.setParam(param)
rec_count: int
img_base64_encoded: str
rec_count, img_base64_encoded = gen_plot_image(
conn, param_device_name, image_date_params, logger=app_logger
)
return _responseImageForPhone(rec_count,img_base64_encoded)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
@app.route("/plot_weather/get_devices", methods=["GET"])
def getDevices() -> Response:
"""センサーディバイスリスト取得リクエスト
:return: JSON形式(idを除くセンサーディバイスリスト)
(出力内容) JSON({"data":{"devices":[...]}')
"""
if app_logger_debug:
app_logger.debug(request.path)
devices_with_dict: List[Dict]
try:
conn: connection = get_connection()
dao: DeviceDao = DeviceDao(conn, logger=app_logger)
devices: List[DeviceRecord] = dao.get_devices()
devices_with_dict = DeviceDao.to_dict_without_id(devices)
resp_obj: Dict[str, Dict] = {
"data": {"devices": devices_with_dict},
"status": {"code": 0, "message": "OK"}
}
return _make_respose(resp_obj, 200)
except psycopg2.Error as db_err:
app_logger.error(db_err)
abort(InternalServerError.code, _set_errormessage(f"559,{db_err}"))
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
def _debugOutRequestObj(request, debugout=DebugOutRequest.ARGS) -> None:
if debugout == DebugOutRequest.ARGS or debugout == DebugOutRequest.BOTH:
app_logger.debug(f"reqeust.args: {request.args}")
if debugout == DebugOutRequest.HEADERS or debugout == DebugOutRequest.BOTH:
app_logger.debug(f"request.headers: {request.headers}")
def _matchToken(headers: Headers) -> bool:
"""トークン一致チェック
:param headers: request header
:return: if match token True, not False.
"""
token_value: str = app.config.get("HEADER_REQUEST_PHONE_TOKEN_VALUE", "!")
req_token_value: Optional[str] = headers.get(
key=app.config.get("HEADER_REQUEST_PHONE_TOKEN_KEY", "!"),
type=str,
default=""
)
if req_token_value != token_value:
app_logger.warning("Invalid request token!")
return False
return True
def _checkPhoneImageSize(headers: Headers) -> str:
"""
ヘッダーに表示領域サイズ+密度([width]x[height]x[density])をつけてくる
※1.トークンチェックを通過しているのでセットされている前提で処理
※2.途中でエラー (Androidアプリ側のBUG) ならExceptionで補足されJSONでメッセージが返却される
:param headers: request header
:return: (imageWidth, imageHeight, density)
"""
img_size: str = headers.get(
app.config.get("HEADER_REQUEST_IMAGE_SIZE_KEY", ""), type=str, default=""
)
if app_logger_debug:
app_logger.debug(f"Phone imgSize: {img_size}")
if len(img_size) == 0:
abort(BadRequest.code, _set_errormessage(REQUIRED_PHONE_IMG))
sizes: List[str] = img_size.split("x")
try:
img_wd: int = int(sizes[0])
img_ht: int = int(sizes[1])
density: float = float(sizes[2])
if app_logger_debug:
app_logger.debug(f"imgWd: {img_wd}, imgHt: {img_ht}, density: {density}")
return img_size
except Exception as exp:
# ログには例外メッセージ
app_logger.warning(f"[phone image size] {exp}")
abort(BadRequest.code, _set_errormessage(INVALID_PHONE_IMG))
def _checkBeforeDays(args: MultiDict) -> str:
# QueryParameter: before_days in (1,2,3,7)
# before_days = args.get("before_days", default=-1, type=int)
# args.get(key): keyが無い場合も キーが有る場合で数値以外でも -1 となり必須チェックができない
# before_days = args.pop("before_days"): TypeError: 'ImmutableMultiDict' objects are immutable
if len(args.keys()) == 0 or PARAM_BOFORE_DAYS not in args.keys():
abort(BadRequest.code, _set_errormessage(REQUIRED_BOFORE_DAY))
before_days = args.get(PARAM_BOFORE_DAYS, default=-1, type=int)
if before_days not in [1,2,3,7]:
abort(BadRequest.code, _set_errormessage(INVALID_BOFORE_DAY))
return str(before_days)
def _checkDeviceName(args: MultiDict) -> str:
"""デバイス名チェック
パラメータなし: abort(BadRequest)
該当レコードなし: abort(NotFound)
return デバイス名
"""
# 必須チェック
if len(args.keys()) == 0 or PARAM_DEVICE not in args.keys():
abort(BadRequest.code, _set_errormessage(REQUIRED_DEVICE))
# 長さチェック: 1 - 20
param_device_name: str = args.get(PARAM_DEVICE, default="", type=str)
chk_size: int = len(param_device_name)
if chk_size < 1 or chk_size > DEVICE_LENGTH:
abort(BadRequest.code, _set_errormessage(INVALIDD_DEVICE))
# 存在チェック
if app_logger_debug:
app_logger.debug("requestParam.device_name: " + param_device_name)
exists: bool = False
try:
conn: connection = get_connection()
dao: DeviceDao = DeviceDao(conn, logger=app_logger)
exists = dao.exists(param_device_name)
except Exception as exp:
app_logger.error(exp)
abort(InternalServerError.code, description=str(exp))
if exists is True:
return param_device_name
else:
abort(BadRequest.code, _set_errormessage(DEVICE_NOT_FOUND))
def _checkStartDay(args: MultiDict) -> Optional[str]:
"""検索開始日の形式チェック
パラメータなし: OK
パラメータ有り: ISO8601形式チェック
return 検索開始日 | None
"""
if len(args.keys()) == 0 or PARAM_START_DAY not in args.keys():
return None
# 形式チェック
param_start_day: str = args.get(PARAM_START_DAY, default="", type=str)
if app_logger_debug:
app_logger.debug(f"start_day: {param_start_day}")
valid: bool = date_util.checkIso8601Date(param_start_day)
if valid is True:
return param_start_day
else:
# 不正パラメータ
abort(BadRequest.code, _set_errormessage(INVALID_START_DAY))
def _createImageResponse(img_src: str) -> Response:
"""画像レスポンスを返却する (JavaScript用)"""
resp_obj = {"status": "success", "data": {"img_src": img_src}}
return _make_respose(resp_obj, 200)
def _createErrorImageResponse(err_code) -> Response:
"""エラー画像レスポンスを返却する (JavaScript用)"""
resp_obj = {"status": "error", "code": err_code}
if err_code == BadRequest.code:
resp_obj["data"] = {"img_src": BAD_REQUEST_IMAGE_DATA}
elif err_code == InternalServerError.code:
resp_obj["data"] = {"img_src": INTERNAL_SERVER_ERROR_IMAGE_DATA}
return _make_respose(resp_obj, err_code)
def _responseLastDataForPhone(
mesurement_time: str,
temp_out: float,
temp_in: float,
humid: float,
pressure: float,
rec_count: int
) -> Response:
"""気象データの最終レコードを返却する (スマホアプリ用)"""
resp_obj: Dict[str, Dict[str, Union[str, float]]] = {
"status":
{"code": 0, "message": "OK"},
"data": {
"measurement_time": mesurement_time,
"temp_out": temp_out,
"temp_in": temp_in,
"humid": humid,
"pressure": pressure,
"rec_count": rec_count
}
}
return _make_respose(resp_obj, 200)
def _responseFirstRegisterDayForPhone(
first_day: Optional[str],
rec_count: int
) -> Response:
"""気象データの初回登録日を返却する (スマホアプリ用)"""
resp_obj: Dict[str, Dict[str, Union[str, int]]] = {
"status":
{"code": 0, "message": "OK"},
"data": {
"first_register_day": first_day,
"rec_count": rec_count
}
}
return _make_respose(resp_obj, 200)
def _responseImageForPhone(rec_count: int, img_src: str) -> Response:
"""Matplotlib生成画像を返却する (スマホアプリ用)
[仕様変更] 2023-09-09
レスポンスにレコード件数を追加 ※0件エラーの抑止
"""
resp_obj: Dict[str, Dict[str, Union[int, str]]] = {
"status": {"code": 0, "message": "OK"},
"data": {
"img_src": img_src,
"rec_count": rec_count
}
}
return _make_respose(resp_obj, 200)
def _set_errormessage(message: str) -> Dict:
ABORT_DICT_BLANK_MESSAGE[MSG_DESCRIPTION] = message
return ABORT_DICT_BLANK_MESSAGE
# Request parameter check error.
@app.errorhandler(BadRequest.code)
# Token error.
@app.errorhandler(Forbidden.code)
# Device not found.
@app.errorhandler(NotFound.code)
@app.errorhandler(InternalServerError.code)
def error_handler(error: HTTPException) -> Response:
app_logger.warning(f"error_type:{type(error)}, {error}")
# Bugfix: 2023-09-06
err_msg: str
if isinstance(error.description, dict):
# アプリが呼び出すabort()の場合は辞書オブジェクト
err_msg = error.description["error_message"]
else:
# Flaskが出す場合は HTTPException)
err_msg = error.description
resp_obj: Dict[str, Dict[str, Union[int, str]]] = {
"status": {"code": error.code, "message": err_msg}
}
return _make_respose(resp_obj, error.code)
def _make_respose(resp_obj: Dict, resp_code) -> Response:
response = make_response(jsonify(resp_obj), resp_code)
response.headers["Content-Type"] = "application/json"
return response
| pipito-yukio/plot_weather_flaskapp | src/plot_weather/views/app_main.py | app_main.py | py | 26,178 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "plot_weather.app.config",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "plot_weather.app",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "plot_weather.app.config",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_na... |
21051188362 | from django.db import models
from django_countries.fields import CountryField
from product.models import product, product_version
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth import get_user_model
User = get_user_model()
from decimal import Decimal
from django.conf import settings
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='order_user')
full_name = models.CharField(max_length=50)
address1 = models.CharField(max_length=250)
address2 = models.CharField(max_length=250)
city = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
post_code = models.CharField(max_length=20)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
total_paid = models.DecimalField(max_digits=5, decimal_places=2)
order_key = models.CharField(max_length=200)
billing_status = models.BooleanField(default=False)
class Meta:
ordering = ('-created',)
def __str__(self):
return str(self.created)
class OrderItem(models.Model):
order = models.ForeignKey(Order,
related_name='items',
on_delete=models.CASCADE)
product = models.ForeignKey(product,
related_name='order_items',
on_delete=models.CASCADE)
price = models.DecimalField(max_digits=5, decimal_places=2)
quantity = models.PositiveIntegerField(default=1)
def __str__(self):
return str(self.id) # type: ignore
class Basket(models.Model):
...
# """
# A base Basket class, providing some default behaviors that
# can be inherited or overrided, as necessary.
# """
# def __init__(self, request):
# self.session = request.session
# basket = self.session.get(settings.BASKET_SESSION_ID)
# if settings.BASKET_SESSION_ID not in request.session:
# basket = self.session[settings.BASKET_SESSION_ID] = {}
# self.basket = basket
# def add(self, product, qty):
# """
# Adding and updating the users basket session data
# """
# product_id = str(product.id)
# if product_id in self.basket:
# self.basket[product_id]["qty"] = qty
# else:
# self.basket[product_id] = {"price": str(product.regular_price), "qty": qty}
# self.save()
# def __iter__(self):
# """
# Collect the product_id in the session data to query the database
# and return products
# """
# product_ids = self.basket.keys()
# products = product.objects.filter(id__in=product_ids) # type: ignore
# basket = self.basket.copy()
# for product in products:
# basket[str(product.id)]["product"] = product
# for item in basket.values():
# item["price"] = Decimal(item["price"])
# item["total_price"] = item["price"] * item["qty"]
# yield item
# def __len__(self):
# """
# Get the basket data and count the qty of items
# """
# return sum(item["qty"] for item in self.basket.values())
# def update(self, product, qty):
# """
# Update values in session data
# """
# product_id = str(product)
# if product_id in self.basket:
# self.basket[product_id]["qty"] = qty
# self.save()
# def get_subtotal_price(self):
# return sum(Decimal(item["price"]) * item["qty"] for item in self.basket.values())
# def get_total_price(self):
# subtotal = sum(Decimal(item["price"]) * item["qty"] for item in self.basket.values())
# shipping = Decimal(0.00) if subtotal == 0 else Decimal(11.50)
# return subtotal + Decimal(shipping)
# def delete(self, product):
# """
# Delete item from session data
# """
# product_id = str(product)
# if product_id in self.basket:
# del self.basket[product_id]
# self.save()
# def clear(self):
# # Remove basket from session
# del self.session[settings.BASKET_SESSION_ID]
# self.save()
# def save(self):
# self.session.modified = True
class WishList(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE,null=True)
item = models.ForeignKey(product_version, on_delete=models.CASCADE,blank=True, null=True)
class Meta(object):
verbose_name = 'WishList'
verbose_name_plural = 'WishLists'
def __str__(self):
return f"{self.user}"
class CheckoutBilling(models.Model):
first_name = models.CharField(max_length=50,verbose_name='First Name', help_text='Max 255 character')
last_name = models.CharField(max_length=50,verbose_name='Last Name', help_text='Max 255 character')
company = models.TextField(verbose_name='Company')
email = models.EmailField(verbose_name='Email Address')
address = models.TextField(verbose_name='Street Address')
country = CountryField(max_length=255, verbose_name='Country')
telephone = models.CharField(max_length=25 ,verbose_name='Telephone')
fax = models.CharField(max_length=50, verbose_name='Fax')
user_id = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
class Meta:
verbose_name = "Checkout Billing"
verbose_name_plural = "Checkout Billings"
def __str__(self):
return self.first_name
class Checkout(models.Model):
address=models.CharField(max_length=100)
created_at=models.DateField(auto_now_add=True)
updated_at=models.DateField(auto_now=True)
def __str__(self):
return self.address
class CheckoutShipping(models.Model):
first_name = models.CharField(max_length=50,verbose_name='First Name', help_text='Max 255 character')
last_name = models.CharField(max_length=50,verbose_name='Last Name', help_text='Max 255 character')
company = models.TextField(verbose_name='Company')
email = models.EmailField(verbose_name='Email Address')
address = models.TextField(verbose_name='Street Address')
country = CountryField(max_length=255, verbose_name='Country')
telephone = models.CharField(max_length=25 ,verbose_name='Telephone')
fax = models.CharField(max_length=50, verbose_name='Fax')
user_id = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
class Meta:
verbose_name = "Checkout Shipping"
verbose_name_plural = "Checkout Shipping"
def __str__(self):
return self.first_name
class ShoppingCart(models.Model):
product_name=models.CharField(max_length=200)
img = models.ImageField(upload_to = "images/")
unit_price=models.CharField(max_length=10)
qty=models.CharField(max_length=20)
subtotal=models.CharField(max_length=25)
coupon=models.CharField(max_length=20)
zip_code=models.CharField(max_length=20)
state=models.TextField()
country=models.TextField(blank=False)
class Meta:
verbose_name = "Shopping Cart"
verbose_name_plural = "Shopping Cart"
def __str__(self):
return self.product_name
| Shafag42/SuperB_E-commerce | order/models.py | models.py | py | 7,334 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"ap... |
1360579310 | import pandas as pd
import pathlib as pl
import numpy as np
import RootPath
from abc import abstractmethod
from Utils.Data.Features.RawFeatures import *
from Utils.Data.Dictionary.MappingDictionary import *
def map_column_single_value(series, dictionary):
mapped_series = series.map(dictionary).astype(np.int32)
return pd.DataFrame(mapped_series)
def map_column_array(series, dictionary):
mapped_series = series.map(
lambda x: np.array([dictionary[y] for y in x.split('\t')], dtype=np.int32) if x is not pd.NA else None)
return pd.DataFrame(mapped_series)
class MappedFeaturePickle(Feature):
"""
Abstract class representing a dictionary that works with pickle file.
"""
def __init__(self, feature_name: str, dataset_id: str):
super().__init__(feature_name, dataset_id)
self.pck_path = pl.Path(f"{Feature.ROOT_PATH}/{self.dataset_id}/mapped/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(f"{Feature.ROOT_PATH}/{self.dataset_id}/mapped/{self.feature_name}.csv.gz")
def has_feature(self):
return self.pck_path.is_file()
def load_feature(self):
assert self.has_feature(), f"The feature {self.feature_name} does not exists. Create it first."
df = pd.read_pickle(self.pck_path, compression="gzip")
# Renaming the column for consistency purpose
df.columns = [self.feature_name]
return df
@abstractmethod
def create_feature(self):
pass
def save_feature(self, dataframe: pd.DataFrame):
# Changing column name
dataframe.columns = [self.feature_name]
self.pck_path.parent.mkdir(parents=True, exist_ok=True)
dataframe.to_pickle(self.pck_path, compression='gzip')
# For backup reason
# self.csv_path.parent.mkdir(parents=True, exist_ok=True)
# dataframe.to_csv(self.csv_path, compression='gzip', index=True)
class MappedFeatureTweetLanguage(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_language", dataset_id)
def create_feature(self):
feature = RawFeatureTweetLanguage(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingLanguageDictionary().load_or_create()
mapped_dataframe = map_column_single_value(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureGroupedTweetLanguage(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_grouped_tweet_language", dataset_id)
self.group_id_dict = {}
self.current_mapping = 0
def get_grouped_id(self, language_id):
# ??? inglese misto altre cose
if language_id == 16 or language_id == 18 or language_id == 20:
return 16
# [UNK]
elif language_id == 26 or language_id == 56 or language_id == 57 or language_id == 58 or language_id == 59 or language_id == 61:
return 26
# ???
elif language_id == 28 or language_id == 36 or language_id == 37 or language_id == 43 or language_id == 45 or language_id == 46:
return 28
# persian / pashto
elif language_id == 25 or language_id == 44 or language_id == 41:
return 25
# lingue indiane
elif language_id == 8 or language_id == 32 or language_id == 34 or language_id == 35 or language_id == 47 or language_id == 48 or language_id == 49 or language_id == 50 or language_id == 52 or language_id == 53 or language_id == 54 or language_id == 60 or language_id == 62:
return 8
# lingue est europa
elif language_id == 14 or language_id == 23 or language_id == 24 or language_id == 55:
return 14
# lingue nord europa
elif language_id == 21 or language_id == 31 or language_id == 38 or language_id == 39:
return 21
# lingue centro europa / balcani
elif language_id == 29 or language_id == 40 or language_id == 42:
return 29
# others (vietnamita, birmano, armeno, georgiano, uiguro)
elif language_id == 30 or language_id == 51 or language_id == 63 or language_id == 64 or language_id == 65:
return 30
else:
return language_id
def remap_language_id(self, group_id):
if group_id not in self.group_id_dict:
self.group_id_dict[group_id] = self.current_mapping
self.current_mapping += 1
return self.group_id_dict[group_id]
def create_feature(self):
feature = MappedFeatureTweetLanguage(self.dataset_id)
dataframe = feature.load_or_create()
#dataframe = dataframe.head()
grouped_dataframe = pd.DataFrame(dataframe["mapped_feature_tweet_language"].map(lambda x: self.get_grouped_id(x)))
#print(grouped_dataframe)
mapped_dataframe = pd.DataFrame(dataframe["mapped_feature_tweet_language"].map(lambda x: self.remap_language_id(x)))
#print(mapped_dataframe)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetId(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_id", dataset_id)
def create_feature(self):
feature = RawFeatureTweetId(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingTweetIdDictionary().load_or_create()
mapped_dataframe = map_column_single_value(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureCreatorId(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_creator_id", dataset_id)
def create_feature(self):
feature = RawFeatureCreatorId(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingUserIdDictionary().load_or_create()
mapped_dataframe = map_column_single_value(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureEngagerId(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_engager_id", dataset_id)
def create_feature(self):
feature = RawFeatureEngagerId(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingUserIdDictionary().load_or_create()
mapped_dataframe = map_column_single_value(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetHashtags(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_hashtags", dataset_id)
def create_feature(self):
feature = RawFeatureTweetHashtags(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingHashtagDictionary().load_or_create()
mapped_dataframe = map_column_array(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetLinks(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_links", dataset_id)
def create_feature(self):
feature = RawFeatureTweetLinks(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingLinkDictionary().load_or_create()
mapped_dataframe = map_column_array(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetDomains(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_domains", dataset_id)
def create_feature(self):
feature = RawFeatureTweetDomains(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingDomainDictionary().load_or_create()
mapped_dataframe = map_column_array(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
class MappedFeatureTweetMedia(MappedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("mapped_feature_tweet_media", dataset_id)
def create_feature(self):
feature = RawFeatureTweetMedia(self.dataset_id)
dataframe = feature.load_or_create()
dictionary = MappingMediaDictionary().load_or_create()
mapped_dataframe = map_column_array(dataframe[feature.feature_name], dictionary)
self.save_feature(mapped_dataframe)
| MaurizioFD/recsys-challenge-2020-twitter | Utils/Data/Features/MappedFeatures.py | MappedFeatures.py | py | 8,608 | python | en | code | 39 | github-code | 6 | [
{
"api_name": "numpy.int32",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.NA",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"l... |
9179526990 | import os
import stat
import string
from absl.testing import absltest
from src.test.py.bazel import test_base
# pylint: disable=g-import-not-at-top
if os.name == 'nt':
import win32api
class LauncherTest(test_base.TestBase):
def _buildJavaTargets(self, bazel_bin, binary_suffix):
self.RunBazel(['build', '//foo'])
main_binary = os.path.join(bazel_bin, 'foo/foo%s' % binary_suffix)
self.assertTrue(os.path.isfile(main_binary))
self.assertTrue(
os.path.isdir(
os.path.join(bazel_bin, 'foo/foo%s.runfiles' % binary_suffix)))
if self.IsWindows():
self.assertTrue(os.path.isfile(main_binary))
self.AssertRunfilesManifestContains(
os.path.join(
bazel_bin, 'foo/foo%s.runfiles/MANIFEST' % binary_suffix
),
'_main/bar/bar.txt',
)
else:
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/foo.runfiles/_main/bar/bar.txt')
)
)
_, stdout, _ = self.RunProgram([main_binary])
self.assertEqual(len(stdout), 4)
self.assertEqual(stdout[0], 'hello java')
if self.IsWindows():
self.assertRegexpMatches(
stdout[1], r'java_runfiles=.*foo\\foo%s.runfiles' % binary_suffix)
self.assertEqual(stdout[2], 'runfiles_manifest_only=1')
self.assertRegexpMatches(
stdout[3], r'^runfiles_manifest_file=[a-zA-Z]:[/\\].*MANIFEST$')
else:
self.assertRegexpMatches(stdout[1], r'java_runfiles=.*/foo/foo.runfiles')
self.assertEqual(stdout[2], 'runfiles_manifest_only=')
self.assertRegexpMatches(stdout[3], r'^runfiles_manifest_file.*MANIFEST$')
def _buildShBinaryTargets(self, bazel_bin, bin1_suffix):
self.RunBazel(['build', '//foo:bin1.sh'])
bin1 = os.path.join(bazel_bin, 'foo', 'bin1.sh%s' % bin1_suffix)
self.assertTrue(os.path.exists(bin1))
self.assertTrue(
os.path.isdir(
os.path.join(bazel_bin, 'foo/bin1.sh%s.runfiles' % bin1_suffix)))
self.RunBazel(['build', '//foo:bin2.cmd'])
bin2 = os.path.join(bazel_bin, 'foo/bin2.cmd')
self.assertTrue(os.path.exists(bin2))
self.assertTrue(
os.path.isdir(os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles')))
exit_code, _, stderr = self.RunBazel(
['build', '//foo:bin3.bat'], allow_failure=True
)
if self.IsWindows():
self.AssertExitCode(exit_code, 1, stderr)
self.assertIn('target name extension should match source file extension',
os.linesep.join(stderr))
else:
bin3 = os.path.join(bazel_bin, 'foo', 'bin3.bat')
self.assertTrue(os.path.exists(bin3))
self.assertTrue(
os.path.isdir(os.path.join(bazel_bin, 'foo/bin3.bat.runfiles')))
if self.IsWindows():
self.assertTrue(os.path.isfile(bin1))
self.assertTrue(os.path.isfile(bin2))
else:
self.assertTrue(os.path.islink(bin1))
self.assertTrue(os.path.islink(bin2))
self.assertTrue(os.path.islink(bin3))
if self.IsWindows():
self.AssertRunfilesManifestContains(
os.path.join(
bazel_bin, 'foo/bin1.sh%s.runfiles/MANIFEST' % bin1_suffix
),
'_main/bar/bar.txt',
)
self.AssertRunfilesManifestContains(
os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles/MANIFEST'),
'_main/bar/bar.txt',
)
else:
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/bin1.sh.runfiles/_main/bar/bar.txt')
)
)
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/bin2.cmd.runfiles/_main/bar/bar.txt')
)
)
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/bin3.bat.runfiles/_main/bar/bar.txt')
)
)
_, stdout, _ = self.RunProgram([bin1])
self.assertEqual(len(stdout), 3)
self.assertEqual(stdout[0], 'hello shell')
if self.IsWindows():
self.assertEqual(stdout[1], 'runfiles_manifest_only=1')
self.assertRegexpMatches(
stdout[2],
(r'^runfiles_manifest_file='
r'[a-zA-Z]:/.*/foo/bin1.sh%s.runfiles/MANIFEST$' % bin1_suffix))
else:
# TODO(laszlocsomor): Find out whether the runfiles-related envvars should
# be set on Linux (e.g. $RUNFILES, $RUNFILES_MANIFEST_FILE). Currently
# they aren't, and that may be a bug. If it's indeed a bug, fix that bug
# and update this test.
self.assertEqual(stdout[1], 'runfiles_manifest_only=')
self.assertEqual(stdout[2], 'runfiles_manifest_file=')
if self.IsWindows():
exit_code, stdout, stderr = self.RunProgram([bin2])
self.AssertExitCode(exit_code, 0, stderr)
self.assertEqual(stdout[0], 'hello batch')
def _buildPyTargets(self, bazel_bin, binary_suffix):
# Verify that the build of our py_binary succeeds.
self.RunBazel(['build', '//foo:foo'])
# Verify that generated files exist.
foo_bin = os.path.join(bazel_bin, 'foo', 'foo%s' % binary_suffix)
self.assertTrue(os.path.isfile(foo_bin))
self.assertTrue(
os.path.isdir(
os.path.join(bazel_bin, 'foo/foo%s.runfiles' % binary_suffix)))
# Verify contents of runfiles (manifest).
if self.IsWindows():
self.AssertRunfilesManifestContains(
os.path.join(
bazel_bin, 'foo/foo%s.runfiles/MANIFEST' % binary_suffix
),
'_main/bar/bar.txt',
)
else:
self.assertTrue(
os.path.islink(
os.path.join(bazel_bin, 'foo/foo.runfiles/_main/bar/bar.txt')
)
)
# Try to run the built py_binary.
_, stdout, _ = self.RunProgram([foo_bin])
self.assertEqual(stdout[0], 'Hello World!')
# Try to use the py_binary as an executable in a Starlark rule.
self.RunBazel(['build', '//foo:hello'])
# Verify that the Starlark action generated the right output.
hello_path = os.path.join(bazel_bin, 'foo', 'hello.txt')
self.assertTrue(os.path.isfile(hello_path))
with open(hello_path, 'r') as f:
self.assertEqual(f.read(), 'Hello World!')
# Verify that running py_test succeeds.
self.RunBazel(['test', '//foo:test'])
def _buildAndCheckArgumentPassing(self, package, target_name):
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self.RunBazel(['build', '//%s:%s' % (package, target_name)])
bin_suffix = '.exe' if self.IsWindows() else ''
bin1 = os.path.join(bazel_bin, package, '%s%s' % (target_name, bin_suffix))
self.assertTrue(os.path.exists(bin1))
self.assertTrue(
os.path.isdir(
os.path.join(bazel_bin, '%s/%s%s.runfiles' % (package, target_name,
bin_suffix))))
arguments = ['a', 'a b', '"b"', 'C:\\a\\b\\', '"C:\\a b\\c\\"']
_, stdout, _ = self.RunProgram([bin1] + arguments)
self.assertEqual(stdout, arguments)
def testJavaBinaryLauncher(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'java_binary(',
' name = "foo",',
' srcs = ["Main.java"],',
' main_class = "Main",',
' data = ["//bar:bar.txt"],',
')',
])
self.ScratchFile('foo/Main.java', [
'public class Main {',
' public static void main(String[] args) {'
' System.out.println("hello java");',
' System.out.println("java_runfiles=" + ',
' System.getenv("JAVA_RUNFILES"));',
' System.out.println("runfiles_manifest_only=" + ',
' System.getenv("RUNFILES_MANIFEST_ONLY"));',
' System.out.println("runfiles_manifest_file=" + ',
' System.getenv("RUNFILES_MANIFEST_FILE"));',
' }',
'}',
])
self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])'])
self.ScratchFile('bar/bar.txt', ['hello'])
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self._buildJavaTargets(bazel_bin, '.exe' if self.IsWindows() else '')
def testJavaBinaryArgumentPassing(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'java_binary(',
' name = "bin",',
' srcs = ["Main.java"],',
' main_class = "Main",',
')',
])
self.ScratchFile('foo/Main.java', [
'public class Main {',
' public static void main(String[] args) {'
' for (String arg : args) {',
' System.out.println(arg);',
' }'
' }',
'}',
])
self._buildAndCheckArgumentPassing('foo', 'bin')
def testShBinaryLauncher(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'foo/BUILD',
[
# On Linux/MacOS, all sh_binary rules generate an output file with
# the same name as the rule, and this is a symlink to the file in
# `srcs`. (Bazel allows only one file in `sh_binary.srcs`.)
# On Windows, if the srcs's extension is one of ".exe", ".cmd", or
# ".bat", then Bazel requires the rule's name has the same
# extension, and the output file will be a copy of the source file.
'sh_binary(',
' name = "bin1.sh",',
' srcs = ["foo.sh"],',
' data = ["//bar:bar.txt"],',
')',
'sh_binary(',
' name = "bin2.cmd",', # name's extension matches that of srcs[0]
' srcs = ["foo.cmd"],',
' data = ["//bar:bar.txt"],',
')',
'sh_binary(',
' name = "bin3.bat",', # name's extension doesn't match srcs[0]'s
' srcs = ["foo.cmd"],',
' data = ["//bar:bar.txt"],',
')',
])
foo_sh = self.ScratchFile('foo/foo.sh', [
'#!/bin/bash',
'echo hello shell',
'echo runfiles_manifest_only=${RUNFILES_MANIFEST_ONLY:-}',
'echo runfiles_manifest_file=${RUNFILES_MANIFEST_FILE:-}',
])
foo_cmd = self.ScratchFile('foo/foo.cmd', ['@echo hello batch'])
self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])'])
self.ScratchFile('bar/bar.txt', ['hello'])
os.chmod(foo_sh, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
os.chmod(foo_cmd, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self._buildShBinaryTargets(bazel_bin, '.exe' if self.IsWindows() else '')
def testShBinaryArgumentPassing(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'sh_binary(',
' name = "bin",',
' srcs = ["bin.sh"],',
')',
])
foo_sh = self.ScratchFile('foo/bin.sh', [
'#!/bin/bash',
'# Store arguments in a array',
'args=("$@")',
'# Get the number of arguments',
'N=${#args[@]}',
'# Echo each argument',
'for (( i=0;i<$N;i++)); do',
' echo ${args[${i}]}',
'done',
])
os.chmod(foo_sh, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
self._buildAndCheckArgumentPassing('foo', 'bin')
def testPyBinaryLauncher(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'foo/foo.bzl',
[
'def _impl(ctx):',
' ctx.actions.run(',
' arguments=[ctx.outputs.out.path],',
' outputs=[ctx.outputs.out],',
' executable=ctx.executable._hello_world,',
' use_default_shell_env=True)',
'',
'helloworld = rule(',
' implementation=_impl,',
' attrs={',
' "srcs": attr.label_list(allow_files=True),',
' "out": attr.output(mandatory=True),',
' "_hello_world": attr.label(executable=True, cfg="exec",',
' allow_files=True,',
' default=Label("//foo:foo"))',
' }',
')',
],
)
self.ScratchFile('foo/BUILD', [
'load(":foo.bzl", "helloworld")', '', 'py_binary(', ' name = "foo",',
' srcs = ["foo.py"],', ' data = ["//bar:bar.txt"],', ')', '',
'py_test(', ' name = "test",', ' srcs = ["test.py"],', ')', '',
'helloworld(', ' name = "hello",', ' out = "hello.txt",', ')'
])
foo_py = self.ScratchFile('foo/foo.py', [
'#!/usr/bin/env python3',
'import sys',
'if len(sys.argv) == 2:',
' with open(sys.argv[1], "w") as f:',
' f.write("Hello World!")',
'else:',
' print("Hello World!")',
])
test_py = self.ScratchFile('foo/test.py', [
'#!/usr/bin/env python3',
'import unittest',
'class MyTest(unittest.TestCase):',
' def test_dummy(self):',
' pass',
'if __name__ == \'__main__\':',
' unittest.main()',
])
self.ScratchFile('bar/BUILD', ['exports_files(["bar.txt"])'])
self.ScratchFile('bar/bar.txt', ['hello'])
os.chmod(foo_py, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
os.chmod(test_py, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self._buildPyTargets(bazel_bin, '.exe' if self.IsWindows() else '')
def testPyBinaryArgumentPassing(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'py_binary(',
' name = "bin",',
' srcs = ["bin.py"],',
')',
])
self.ScratchFile('foo/bin.py', [
'import sys',
'for arg in sys.argv[1:]:',
' print(arg)',
])
self._buildAndCheckArgumentPassing('foo', 'bin')
def testPyBinaryLauncherWithDifferentArgv0(self):
"""Test for https://github.com/bazelbuild/bazel/issues/14343."""
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'py_binary(',
' name = "bin",',
' srcs = ["bin.py"],',
')',
])
self.ScratchFile('foo/bin.py', ['print("Hello world")'])
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
# Verify that the build of our py_binary succeeds.
self.RunBazel(['build', '//foo:bin'])
# Try to run the built py_binary.
binary_suffix = '.exe' if self.IsWindows() else ''
foo_bin = os.path.join(bazel_bin, 'foo', 'bin%s' % binary_suffix)
args = [r'C:\Invalid.exe' if self.IsWindows() else '/invalid']
_, stdout, _ = self.RunProgram(args, executable=foo_bin)
self.assertEqual(stdout[0], 'Hello world')
def testWindowsJavaExeLauncher(self):
# Skip this test on non-Windows platforms
if not self.IsWindows():
return
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'java_binary(',
' name = "foo",',
' srcs = ["Main.java"],',
' main_class = "Main",',
' jvm_flags = ["--flag1", "--flag2"],',
' data = ["advice-1.jar", "advice-2.jar"],',
')',
])
self.ScratchFile('foo/advice-1.jar')
self.ScratchFile('foo/advice-2.jar')
self.ScratchFile('foo/Main.java', [
'public class Main {',
' public static void main(String[] args) {',
' System.out.println("helloworld");',
' }',
'}',
])
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self.RunBazel(['build', '//foo:foo'])
binary = os.path.join(bazel_bin, 'foo', 'foo.exe')
self.assertTrue(os.path.exists(binary))
# Add this flag to make launcher print the command it generated instead of
# launching the real program.
print_cmd = '--print_launcher_command'
_, stdout, _ = self.RunProgram([binary, '--debug', print_cmd])
self.assertIn(
'-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005',
stdout)
_, stdout, _ = self.RunProgram(
[binary, '--debug', print_cmd],
env_add={'DEFAULT_JVM_DEBUG_PORT': '12345'},
)
self.assertIn(
'-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=12345',
stdout)
_, stdout, _ = self.RunProgram(
[binary, '--debug=12345', print_cmd],
env_add={
'DEFAULT_JVM_DEBUG_SUSPEND': 'n',
'PERSISTENT_TEST_RUNNER': 'true',
},
)
self.assertIn(
'-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=12345'
',quiet=y', stdout)
_, stdout, _ = self.RunProgram([binary, '--main_advice=MyMain', print_cmd])
self.assertIn('MyMain', stdout)
_, stdout, _ = self.RunProgram([
binary,
'--main_advice_classpath=foo/advice-1.jar;foo/advice-2.jar',
print_cmd,
])
self.assertIn('-classpath', stdout)
classpath = stdout[stdout.index('-classpath') + 1]
self.assertIn('foo/advice-1.jar', classpath)
self.assertIn('foo/advice-2.jar', classpath)
_, stdout, _ = self.RunProgram(
[binary, '--main_advice_classpath=C:\\foo\\bar', print_cmd]
)
self.assertIn('-classpath', stdout)
classpath = stdout[stdout.index('-classpath') + 1]
self.assertIn('C:\\foo\\bar', classpath)
_, stdout, _ = self.RunProgram(
[binary, '--jvm_flag="--some_path="./a b/c""', print_cmd]
)
self.assertIn('"--some_path=\\"./a b/c\\""', stdout)
_, stdout, _ = self.RunProgram(
[binary, '--jvm_flags="--path1=a --path2=b"', print_cmd]
)
self.assertIn('--path1=a', stdout)
self.assertIn('--path2=b', stdout)
_, stdout, _ = self.RunProgram(
[binary, print_cmd], env_add={'JVM_FLAGS': '--foo --bar'}
)
self.assertIn('--flag1', stdout)
self.assertIn('--flag2', stdout)
self.assertIn('--foo', stdout)
self.assertIn('--bar', stdout)
exit_code, stdout, stderr = self.RunProgram(
[binary, '--singlejar', print_cmd], allow_failure=True
)
self.AssertExitCode(exit_code, 1, stderr)
self.assertIn('foo_deploy.jar does not exist', ''.join(stderr))
self.RunBazel(['build', '//foo:foo_deploy.jar'])
_, stdout, _ = self.RunProgram([binary, '--singlejar', print_cmd])
self.assertIn('-classpath', stdout)
classpath = stdout[stdout.index('-classpath') + 1]
self.assertIn('foo_deploy.jar', classpath)
_, stdout, _ = self.RunProgram([binary, '--print_javabin'])
self.assertIn('local_jdk/bin/java.exe', ''.join(stdout))
my_tmp_dir = self.ScratchDir('my/temp/dir')
_, stdout, _ = self.RunProgram(
[binary, print_cmd], env_add={'TEST_TMPDIR': my_tmp_dir}
)
self.assertIn('-Djava.io.tmpdir=%s' % my_tmp_dir, stdout)
_, stdout, _ = self.RunProgram([binary, '--classpath_limit=0', print_cmd])
self.assertIn('-classpath', stdout)
classpath = stdout[stdout.index('-classpath') + 1]
self.assertRegexpMatches(classpath, r'foo-[A-Za-z0-9]+-classpath.jar$')
def testWindowsNativeLauncherInNonEnglishPath(self):
if not self.IsWindows():
return
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('bin/BUILD', [
'java_binary(',
' name = "bin_java",',
' srcs = ["Main.java"],',
' main_class = "Main",',
')',
'sh_binary(',
' name = "bin_sh",',
' srcs = ["main.sh"],',
')',
])
self.ScratchFile('bin/Main.java', [
'public class Main {',
' public static void main(String[] args) {'
' System.out.println("helloworld");',
' }',
'}',
])
self.ScratchFile('bin/main.sh', [
'echo "helloworld"',
])
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self.RunBazel(['build', '//bin/...'])
for f in [
'bin_java.exe',
'bin_java.exe.runfiles_manifest',
'bin_sh.exe',
'bin_sh',
'bin_sh.exe.runfiles_manifest',
]:
self.CopyFile(os.path.join(bazel_bin, 'bin', f),
os.path.join(u'./\u6d4b\u8bd5', f))
unicode_binary_path = u'./\u6d4b\u8bd5/bin_java.exe'
_, stdout, _ = self.RunProgram([unicode_binary_path])
self.assertEqual('helloworld', ''.join(stdout))
unicode_binary_path = u'./\u6d4b\u8bd5/bin_sh.exe'
_, stdout, _ = self.RunProgram([unicode_binary_path])
self.assertEqual('helloworld', ''.join(stdout))
def testWindowsNativeLauncherInLongPath(self):
if not self.IsWindows():
return
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'bin/BUILD',
[
'java_binary(',
' name = "not_short_bin_java",',
' srcs = ["Main.java"],',
' main_class = "Main",',
')',
'sh_binary(',
' name = "not_short_bin_sh",',
' srcs = ["main.sh"],',
')',
'py_binary(',
' name = "not_short_bin_py",',
' srcs = ["not_short_bin_py.py"],',
')',
],
)
self.ScratchFile('bin/Main.java', [
'public class Main {',
' public static void main(String[] args) {'
' System.out.println("helloworld");',
' }',
'}',
])
self.ScratchFile('bin/main.sh', [
'echo "helloworld"',
])
self.ScratchFile(
'bin/not_short_bin_py.py',
[
'print("helloworld")',
],
)
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
exit_code, _, stderr = self.RunBazel(['build', '//bin/...'])
self.AssertExitCode(exit_code, 0, stderr)
# Create a directory with a path longer than 260
long_dir_path = './' + '/'.join(
[(c * 8 + '.' + c * 3) for c in string.ascii_lowercase])
# The 'not_short_' prefix ensures that the basenames are not already 8.3
# short paths. Due to the long directory path, the basename will thus be
# replaced with a short path such as "not_sh~1.exe" below.
for f in [
'not_short_bin_java.exe',
'not_short_bin_java.exe.runfiles_manifest',
'not_short_bin_sh.exe',
'not_short_bin_sh',
'not_short_bin_sh.exe.runfiles_manifest',
'not_short_bin_py.exe',
'not_short_bin_py.zip',
'not_short_bin_py.exe.runfiles_manifest',
]:
self.CopyFile(
os.path.join(bazel_bin, 'bin', f), os.path.join(long_dir_path, f))
long_binary_path = os.path.abspath(
long_dir_path + '/not_short_bin_java.exe'
)
# subprocess doesn't support long path without shell=True
_, stdout, _ = self.RunProgram([long_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
# Make sure we can launch the binary with a shortened Windows 8dot3 path
short_binary_path = win32api.GetShortPathName(long_binary_path)
self.assertIn('~', os.path.basename(short_binary_path))
_, stdout, _ = self.RunProgram([short_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
long_binary_path = os.path.abspath(long_dir_path + '/not_short_bin_sh.exe')
# subprocess doesn't support long path without shell=True
_, stdout, _ = self.RunProgram([long_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
# Make sure we can launch the binary with a shortened Windows 8dot3 path
short_binary_path = win32api.GetShortPathName(long_binary_path)
self.assertIn('~', os.path.basename(short_binary_path))
_, stdout, _ = self.RunProgram([short_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
long_binary_path = os.path.abspath(long_dir_path + '/not_short_bin_py.exe')
# subprocess doesn't support long path without shell=True
_, stdout, _ = self.RunProgram([long_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
# Make sure we can launch the binary with a shortened Windows 8dot3 path
short_binary_path = win32api.GetShortPathName(long_binary_path)
self.assertIn('~', os.path.basename(short_binary_path))
_, stdout, _ = self.RunProgram([short_binary_path], shell=True)
self.assertEqual('helloworld', ''.join(stdout))
def testWindowsNativeLauncherInvalidArgv0(self):
if not self.IsWindows():
return
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'bin/BUILD',
[
'java_binary(',
' name = "bin_java",',
' srcs = ["Main.java"],',
' main_class = "Main",',
')',
'sh_binary(',
' name = "bin_sh",',
' srcs = ["main.sh"],',
')',
'py_binary(',
' name = "bin_py",',
' srcs = ["bin_py.py"],',
')',
],
)
self.ScratchFile(
'bin/Main.java',
[
'public class Main {',
(
' public static void main(String[] args) {'
' System.out.println("helloworld");'
),
' }',
'}',
],
)
self.ScratchFile(
'bin/main.sh',
[
'echo "helloworld"',
],
)
self.ScratchFile(
'bin/bin_py.py',
[
'print("helloworld")',
],
)
_, stdout, _ = self.RunBazel(['info', 'bazel-bin'])
bazel_bin = stdout[0]
self.RunBazel(['build', '//bin/...'])
_, stdout, _ = self.RunProgram(
['C:\\Invalid'],
executable=os.path.join(bazel_bin, 'bin', 'bin_java.exe'),
)
self.assertEqual('helloworld', ''.join(stdout))
_, stdout, _ = self.RunProgram(
['C:\\Invalid'], executable=os.path.join(bazel_bin, 'bin', 'bin_sh.exe')
)
self.assertEqual('helloworld', ''.join(stdout))
_, stdout, _ = self.RunProgram(
['C:\\Invalid'], executable=os.path.join(bazel_bin, 'bin', 'bin_py.exe')
)
self.assertEqual('helloworld', ''.join(stdout))
def AssertRunfilesManifestContains(self, manifest, entry):
with open(manifest, 'r') as f:
for l in f:
tokens = l.strip().split(' ', 1)
if len(tokens) == 2 and tokens[0] == entry:
return
self.fail('Runfiles manifest "%s" did not contain "%s"' % (manifest, entry))
if __name__ == '__main__':
absltest.main()
| bazelbuild/bazel | src/test/py/bazel/launcher_test.py | launcher_test.py | py | 26,523 | python | en | code | 21,632 | github-code | 6 | [
{
"api_name": "os.name",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "src.test.py.bazel.test_base.TestBase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "src.test.py.bazel.test_base",
"line_number": 12,
"usage_type": "name"
},
{
... |
10242082365 | # -*- coding: utf-8 -*-
"""The Simulator takes in a :obj:`seagull.Board`, and runs a simulation given a
set number of iterations and a rule. For each iteration, the rule is applied to
the Board in order to evolve the lifeforms. After the simulation, run
statistics are returned.
.. code-block:: python
import seagull as sg
board = sg.Board()
board.add(Blinker(), loc=(0,0))
# Initialize a simulator
sim = sg.Simulator(board)
stats = sim.run(sg.rules.conway_classic, iters=1000)
You can always get the history of the whole simulation by calling the
`get_history()` method. The length of the history will always be equal to
:code:`iters + 1` since we include the initial state
.. note::
Running a simulation does not change the :code:`state` attribute of the
board. Internally, the simulator makes a copy of that layout and updates
that instead. This is to avoid unintended behaviour when running
simulations again and again.
Various statistics such as entropy, peak cell coverage, and the like are
returned as a dictionary. This gives us an idea on the characteristics of the
simulation experiment.
.. note::
Some statistics are highly-dependent on the size of the board and the
number of iterations. For example, peak cell coverage (pertaining to the
max. amount of active cells during the whole run) depends on board size. If
you have better ideas for computing these statistics, please open-up an
Issue!
The :code:`run()` method only computes the progress of the board for the whole
simulation, but it does not animate it yet. To create an animation, call the
:code:`animate()` method:
.. code-block:: python
sim.animate()
This returns a :obj:`matplotlib.animation.FuncAnimation` that you can turn into
an interactive animation in your notebook or exported as a GIF.
.. note::
When exporting to GIF, it is required to have the ffmpeg backend installed.
"""
# Import standard library
from typing import Callable, Union
# Import modules
import matplotlib.pyplot as plt
import numpy as np
from loguru import logger
from matplotlib import animation
from .board import Board
from .utils import statistics as stats
class Simulator:
def __init__(self, board: Board):
"""Initialize the class
Parameters
----------
board : seagull.Board
The board to run the simulation on
"""
self.board = board
self.history = [] # type: list
self.stats = {} # type: dict
def run(self, rule: Callable, iters: int, **kwargs) -> dict:
"""Run the simulation for a given number of iterations
Parameters
----------
rule : callable
Callable that takes in an array and returns an array of the same
shape.
iters : int
Number of iterations to run the simulation.
Returns
-------
dict
Computed statistics for the simulation run
"""
layout = self.board.state.copy()
# Append the initial state
self.history.append(layout)
# Run simulation
for i in range(iters):
layout = rule(layout, **kwargs)
self.history.append(layout)
self.stats = self.compute_statistics(self.get_history())
return self.stats
def compute_statistics(self, history: Union[list, np.ndarray]) -> dict:
"""Compute various statistics for the board
Parameters
----------
history : list or numpy.ndarray
The simulation history
Returns
-------
dict
Compute statistics
"""
logger.info("Computing simulation statistics...")
sim_stats = {
"peak_cell_coverage": np.max(
[stats.cell_coverage(h) for h in history]
),
"avg_cell_coverage": np.mean(
[stats.cell_coverage(h) for h in history]
),
"avg_shannon_entropy": np.mean(
[stats.shannon_entropy(h) for h in history]
),
"peak_shannon_entropy": np.max(
[stats.shannon_entropy(h) for h in history]
),
}
return sim_stats
def get_history(self, exclude_init=False) -> np.ndarray:
"""Get the simulation history
Parameters
----------
exclude_init: bool
If True, then excludes the initial state in the history
Returns
-------
numpy.ndarray
Simulation history of shape :code:`(iters+1, board.size[0],
board.size[1])`
"""
history = self.history[1:] if exclude_init else self.history
return np.asarray(history)
def animate(self, figsize=(5, 5), interval=100) -> animation.FuncAnimation:
"""Animate the resulting simulation
Parameters
----------
figsize : tuple
Size of the output figure
interval : int
Interval for transitioning between frames
Returns
-------
matplotlib.animation.FuncAnimation
Animation generated from the run
"""
if not self.history:
msg = "The run() argument must be executed first"
logger.error(msg)
raise ValueError(msg)
logger.info("Rendering animation...")
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
X_blank = np.zeros(self.board.size, dtype=bool)
im = ax.imshow(X_blank, cmap=plt.cm.binary, interpolation="nearest")
im.set_clim(-0.05, 1)
def _animate(i, history):
current_pos = history[i]
im.set_data(current_pos)
return (im,)
def _init():
im.set_data(X_blank)
return (im,)
history = self.get_history()
anim = animation.FuncAnimation(
fig,
func=_animate,
frames=range(history.shape[0]),
init_func=_init,
interval=interval,
fargs=(history,),
blit=True,
)
return anim
| ljvmiranda921/seagull | seagull/simulator.py | simulator.py | py | 6,209 | python | en | code | 167 | github-code | 6 | [
{
"api_name": "board.Board",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_n... |
1592231392 | import asyncio
from flask import Blueprint, abort, flash, redirect, render_template, request, jsonify, url_for, Response
from werkzeug.utils import secure_filename
import socket
from flask_socketio import SocketIO, emit
from app import app, db, socketio
import os
import time
HOST = "127.0.1.1"
WEBSOCKET_PORT = 9999
CHUNK_SIZE = 4096 # Define o tamanho do pacote. Pode ser ajustado conforme necessário.
# Lista de endereços IP dos servidores para armazenamento de réplicas
REPLICA_SERVERS = [HOST, HOST, HOST] #ips locais mockados
#REPLICA_SERVERS = ["192.168.1.2", "192.168.1.3", "192.168.1.4"] # IPs das máquinas das réplicas
main = Blueprint('main', __name__)
MIME_TYPES = {
"mp4": "video/mp4",
"avi": "video/x-msvideo",
"mkv": "video/x-matroska",
"flv": "video/x-flv"
}
class StreamingError(Exception):
"""Exceção personalizada para erros de streaming."""
pass
class Video(db.Model):
__tablename__ = 'video'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
filename = db.Column(db.String(150), unique=True, nullable=False)
description = db.Column(db.String(500), nullable=True)
with app.app_context():
db.create_all()
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']
def upload_to_replica(filename, file_content):
for server_ip in REPLICA_SERVERS:
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((server_ip, WEBSOCKET_PORT))
# Enviar comando UPLOAD
header = f"UPLOAD"
client.send(header.encode())
# Enviar tamanho do arquivo como uma string de tamanho 10
client.send(str(len(file_content)).encode().zfill(10))
# Enviar tamanho do nome do arquivo
client.send(str(len(filename)).encode().zfill(10))
# Enviar nome do arquivo
client.send(filename.encode())
# Enviar os dados do arquivo
client.sendall(file_content)
client.close()
except Exception as e:
print(f"Erro ao enviar para servidor {server_ip}: {e}")
@main.route('/upload', methods=['POST'])
def upload_file():
if 'file' not in request.files:
return jsonify({"error": "No file provided"}), 400
file = request.files['file']
if file.filename == '':
return jsonify({"error": "No file selected"}), 400
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
new_video = Video(filename=filename)
db.session.add(new_video)
db.session.commit()
with open(file_path, 'rb') as f:
file_content = f.read()
upload_to_replica(filename, file_content)
return "File uploaded successfully! You can now upload another file."
return jsonify({"error": "Invalid file type"}), 400
@main.route('/', methods=['GET'])
def show_upload():
return render_template('upload.html')
@main.route('/videos', methods=['GET'])
def list_videos():
videos = Video.query.all()
return render_template('video_list.html', videos=videos)
from websockets import connect as ws_connect
@main.route('/play/<int:video_id>', methods=['GET'])
def play_video(video_id):
video = Video.query.get(video_id)
video_name = video.filename
# Adicionando failover para o streaming de vídeo
for _ in range(3): # Tenta até 3 vezes, uma para cada réplica
try:
return stream_video(video_name)
except StreamingError:
continue # Se ocorrer um erro, tenta a próxima réplica
return "Não foi possível reproduzir o vídeo."
def stream_video(video_name):
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((HOST, 9999))
header = f"STREAM"
client.send(header.encode())
client.send(str(len(video_name)).zfill(10).encode())
client.send(video_name.encode())
def generate():
while True:
chunk = client.recv(CHUNK_SIZE)
if not chunk:
break
yield chunk
ext = video_name.split('.')[-1]
mime_type = MIME_TYPES.get(ext, "video/mp4")
return Response(generate(), content_type=mime_type)
except ConnectionError:
# Esta exceção pode ser lançada se houver um problema de conexão de rede
raise StreamingError("Erro de conexão durante o streaming do vídeo")
@main.route('/delete_video/<int:video_id>', methods=['POST'])
def delete_video(video_id):
video = Video.query.get(video_id)
if video:
db.session.delete(video)
db.session.commit()
return redirect(url_for('main.list_videos'))
else:
# Caso o vídeo não seja encontrado no banco de dados
flash('Vídeo não encontrado', 'error')
return redirect(url_for('main.list_videos'))
if __name__ == '__main__':
app.run(debug=True)
| isaacbrasil/My-youtube-flask | app/blueprints/client.py | client.py | py | 5,180 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "app.db.Model",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "app.db.Column",
"line_nu... |
35049082181 | """
3D convolutions using GPU accelereration for Theano (using conv2d)
https://github.com/jaberg/TheanoConv3d2d
"""
import theano
from theano.gradient import DisconnectedType
from theano.gof import Op, Apply
from theano import tensor
import theano.sandbox.cuda as cuda
def get_diagonal_subtensor_view(x, i0, i1):
"""Helper function for DiagonalSubtensor and
IncDiagonalSubtensor
:note: it return a partial view of x, not a partial copy.
"""
if x.shape[i0] < x.shape[i1]:
raise NotImplementedError('is this allowed?')
idx = [slice(None)] * x.ndim
idx[i0] = slice(x.shape[i1] - 1, None, None)
xview = x.__getitem__(tuple(idx))
strides = list(xview.strides)
strides[i1] -= strides[i0]
xview.strides = strides
return xview
class DiagonalSubtensor(Op):
"""Return a form a nd diagonal subtensor.
:param x: n-d tensor
:param i0: axis index in x
:param i1: axis index in x
:note: Work on the GPU.
``x`` is some n-dimensional tensor, but this Op only deals with a
matrix-shaped slice, using axes i0 and i1. Without loss of
generality, suppose that ``i0`` picks out our ``row`` dimension,
and i1 the ``column`` dimension.
So the relevant part of ``x`` is some matrix ``u``. Suppose it has 7 rows
and 4 columns::
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
The view returned by this function is also a matrix. It's a thick,
diagonal ``stripe`` across u that discards the lower left triangle
and the upper right triangle:
[ x 0 0 0 ]
[ x x 0 0 ]
[ x x x 0 ]
[ 0 x x x ]
[ 0 0 x x ]
[ 0 0 0 x ]
In this case the return value would be this view of shape 3x4. The
returned view has the same number of dimensions as the input
``x``, and the only difference is that the shape along dimension
``i0`` has been reduced by ``shape[i1] - 1`` because of the
triangles that got chopped out.
The NotImplementedError is meant to catch the case where shape[i0]
is too small for the stripe to reach across the matrix, in which
case it's not clear what this function should do. Maybe always
raise an error. I'd look back to the call site in the Conv3D to
see what's necessary at that point.
"""
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return "%s" % self.__class__.__name__
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.view_map = {0: [0]}
def __eq__(self, other):
return type(self) == type(other) and self.inplace == other.inplace
def __hash__(self):
return hash((type(self), self.inplace))
def make_node(self, x, i0, i1):
_i0 = tensor.as_tensor_variable(i0)
_i1 = tensor.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1], [x.type()])
def perform(self, node, inputs, output_storage):
xview = get_diagonal_subtensor_view(*inputs)
if self.inplace:
output_storage[0][0] = xview
else:
output_storage[0][0] = xview.copy()
def grad(self, inputs, g_outputs):
z = tensor.zeros_like(inputs[0])
gx = inc_diagonal_subtensor(z, inputs[1], inputs[2], g_outputs[0])
return [gx, DisconnectedType()(), DisconnectedType()()]
def connection_pattern(self, node):
rval = [[True], [False], [False]]
return rval
diagonal_subtensor = DiagonalSubtensor(False)
class IncDiagonalSubtensor(Op):
"""
The gradient of DiagonalSubtensor
"""
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return "%s" % self.__class__.__name__
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def __eq__(self, other):
return type(self) == type(other) and self.inplace == other.inplace
def __hash__(self):
return hash((type(self), self.inplace))
def make_node(self, x, i0, i1, amt):
_i0 = tensor.as_tensor_variable(i0)
_i1 = tensor.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1, amt], [x.type()])
def perform(self, node, inputs, output_storage):
x, i0, i1, amt = inputs
if not self.inplace:
x = x.copy()
xview = get_diagonal_subtensor_view(x, i0, i1)
xview += amt
output_storage[0][0] = x
def grad(self, inputs, g_outputs):
x, i0, i1, amt = inputs
gy = g_outputs[0]
return [gy, DisconnectedType()(), DisconnectedType()(),
diagonal_subtensor(gy, i0, i1)]
def connection_pattern(self, node):
rval = [[True], [False], [False], [True]]
return rval
inc_diagonal_subtensor = IncDiagonalSubtensor(False)
def conv3d(signals, filters,
signals_shape=None, filters_shape=None,
border_mode='valid'):
"""Convolve spatio-temporal filters with a movie.
:param signals: timeseries of images whose pixels have color channels.
shape: [Ns, Ts, C, Hs, Ws]
:param filters: spatio-temporal filters
shape: [Nf, Tf, C, Hf, Wf]
:param signals_shape: None or a tuple/list with the shape of signals
:param filters_shape: None or a tuple/list with the shape of filters
:param border_mode: The only one tested is 'valid'.
:note: Work on the GPU.
"""
if isinstance(border_mode, str):
border_mode = (border_mode, border_mode, border_mode)
_signals_shape_5d = signals.shape if signals_shape is None else signals_shape
_filters_shape_5d = filters.shape if filters_shape is None else filters_shape
_signals_shape_4d = (
_signals_shape_5d[0] * _signals_shape_5d[1],
_signals_shape_5d[2],
_signals_shape_5d[3],
_signals_shape_5d[4],
)
_filters_shape_4d = (
_filters_shape_5d[0] * _filters_shape_5d[1],
_filters_shape_5d[2],
_filters_shape_5d[3],
_filters_shape_5d[4],
)
if border_mode[1] != border_mode[2]:
raise NotImplementedError('height and width bordermodes must match')
conv2d_signal_shape = _signals_shape_4d
conv2d_filter_shape = _filters_shape_4d
if signals_shape is None:
conv2d_signal_shape = None
if filters_shape is None:
conv2d_filter_shape = None
out_4d = tensor.nnet.conv2d(
signals.reshape(_signals_shape_4d),
filters.reshape(_filters_shape_4d),
image_shape=conv2d_signal_shape,
filter_shape=conv2d_filter_shape,
border_mode = border_mode[1]) # ignoring border_mode[2]
# reshape the output to restore its original size
# shape = Ns, Ts, Nf, Tf, W-Wf+1, H-Hf+1
if border_mode[1] == 'valid':
out_tmp = out_4d.reshape((
_signals_shape_5d[0], # Ns
_signals_shape_5d[1], # Ts
_filters_shape_5d[0], # Nf
_filters_shape_5d[1], # Tf
_signals_shape_5d[3] - _filters_shape_5d[3] + 1,
_signals_shape_5d[4] - _filters_shape_5d[4] + 1,
))
elif border_mode[1] == 'full':
out_tmp = out_4d.reshape((
_signals_shape_5d[0], # Ns
_signals_shape_5d[1], # Ts
_filters_shape_5d[0], # Nf
_filters_shape_5d[1], # Tf
_signals_shape_5d[3] + _filters_shape_5d[3] - 1,
_signals_shape_5d[4] + _filters_shape_5d[4] - 1,
))
elif border_mode[1] == 'same':
raise NotImplementedError()
else:
raise ValueError('invalid border mode', border_mode[1])
# now sum out along the Tf to get the output
# but we have to sum on a diagonal through the Tf and Ts submatrix.
if border_mode[0] == 'valid':
out_5d = diagonal_subtensor(out_tmp, 1, 3).sum(axis=3)
elif border_mode[0] in ('full', 'same'):
out_5d = out_4d.reshape((_signals_shape_5d))
# raise NotImplementedError('sequence border mode', border_mode[0])
else:
raise ValueError('invalid border mode', border_mode[1])
return out_5d
def make_gpu_optimizer(op, to_gpu):
"""This function create optimizer that move some inputs to the GPU
for op that work on both CPU and GPU.
The op object is created by calling op(), so good default value
are needed.
We suppose the same op work with CPU and GPU inputs.
:param op: the op that support GPU inputs
:param to_gpu: a list of op inputs that are moved to the GPU.
"""
@theano.gof.local_optimizer([])
def local_to_gpu(node):
"""
op(host_from_gpu()) -> host_from_gpu(op)
gpu_from_host(op) -> op(gpu_from_host)
"""
if isinstance(node.op, op):
#op(host_from_gpu()) -> host_from_gpu(op)
#If any of the input that go on the GPU are on the GPU,
#move the op to the gpu.
if any(node.inputs[idx].owner and
isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)
for idx in to_gpu):
new_inp = list(node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
return [cuda.host_from_gpu(op()(*new_inp))]
if node.op == cuda.gpu_from_host:
#gpu_from_host(op) -> op(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
op):
op_node = host_input.owner
new_inp = list(op_node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
return [op()(*new_inp)]
return False
local_to_gpu.__name__ = "local_to_gpu_" + op.__name__
cuda.opt.register_opt()(local_to_gpu)
if cuda.cuda_available:
make_gpu_optimizer(DiagonalSubtensor, [0])
make_gpu_optimizer(IncDiagonalSubtensor, [0, 3]) | lpigou/Theano-3D-ConvNet | convnet3d/conv3d2d.py | conv3d2d.py | py | 10,163 | python | en | code | 83 | github-code | 6 | [
{
"api_name": "theano.gof.Op",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "theano.tensor.as_tensor_variable",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "theano.tensor",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "theano.t... |
70337573629 | import math
import copy
import numpy as np
import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import slowfast.models.losses as losses
import slowfast.models.optimizer as optim
import slowfast.utils.checkpoint as cu
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
import slowfast.utils.metrics as metrics
import slowfast.utils.misc as misc
import slowfast.visualization.tensorboard_vis as tb
from slowfast.datasets import loader
from slowfast.datasets.mixup import MixUp
from slowfast.models import build_model
from slowfast.utils.meters import EpochTimer, TrainMeter, ValMeter, AdaMeter
logger = logging.get_logger(__name__)
def train_epoch(
train_loaders,
model,
optimizers,
scaler,
train_meter,
cur_epoch,
cfg,
writer=None,
):
"""
Perform the video training for one epoch.
Args:
train_loaders (list of loader): source and target video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's
parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
source_loader = train_loaders[0]
target_unl_loader = train_loaders[1]
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
target_lab_loader = train_loaders[2]
optimizer_f, optimizer_c = optimizers[0], optimizers[1]
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = len(source_loader)
target_unl_iter = iter(target_unl_loader)
target_unl_size = len(target_unl_loader)
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
target_lab_iter = iter(target_lab_loader)
target_lab_size = len(target_lab_loader)
for cur_iter, (inputs_source, labels_source, _, _) in enumerate(source_loader):
# Load the data.
if cur_iter%target_unl_size==0:
target_unl_iter = iter(target_unl_loader)
inputs_target_unl, labels_target_unl, _, _ = next(target_unl_iter)
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
if cur_iter%target_lab_size==0:
target_lab_iter = iter(target_lab_loader)
inputs_target_lab, labels_target_lab, _, _ = next(target_lab_iter)
# Transfer the data to the current GPU device.
for i in range(len(inputs_source)):
inputs_source[i] = inputs_source[i].cuda(non_blocking=True)
inputs_target_unl[i] = inputs_target_unl[i].cuda(non_blocking=True)
labels_source = labels_source.cuda()
labels_target_unl = labels_target_unl.cuda()
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
for i in range(len(inputs_source)):
inputs_target_lab[i] = inputs_target_lab[i].cuda(non_blocking=True)
labels_target_lab = labels_target_lab.cuda()
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer_f, lr)
optim.set_lr(optimizer_c, lr)
train_meter.data_toc()
source_weak = inputs_source[1]
source_strong = inputs_source[0]
target_unl_weak = inputs_target_unl[1]
target_unl_strong = inputs_target_unl[0]
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
target_lab_weak = inputs_target_lab[1]
target_lab_strong = inputs_target_lab[0]
if not cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
lab_inputs = [source_strong]
lab_labels = labels_source
unl_inputs = [target_unl_weak]
unl_labels = labels_target_unl
else:
lab_inputs = [torch.cat((source_strong, target_lab_strong), dim=0)]
lab_labels = torch.cat((labels_source, labels_target_lab), dim=0)
unl_inputs = [target_unl_weak]
unl_labels = labels_target_unl
with torch.cuda.amp.autocast(enabled=cfg.TRAIN.MIXED_PRECISION):
# Step A train all networks to minimize loss on source domain
optimizer_f.zero_grad()
optimizer_c.zero_grad()
lab_preds, lab_feats = model(lab_inputs)
criterion = nn.CrossEntropyLoss()
loss_s = criterion(lab_preds, lab_labels)
loss_s.backward()
optimizer_f.step()
optimizer_c.step()
# Step B train classifier to maximize discrepancy
optimizer_f.zero_grad()
optimizer_c.zero_grad()
unl_preds, unl_feats = model(unl_inputs, reverse=True)
new_preds = F.softmax(unl_preds, dim=1)
loss_h = cfg.MME.LAMBDA * torch.mean(
torch.sum(new_preds * (torch.log(new_preds + 1e-5)), 1))
loss_h.backward()
optimizer_f.step()
optimizer_c.step()
prototypes = model.module.head.weight.clone().detach()
# Compute the errors.
num_topks_correct = metrics.topks_correct(lab_preds, lab_labels, (1, 5))
top1_err, top5_err = [
(1.0 - x / lab_preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss_s, loss_h, top1_err, top5_err = du.all_reduce(
[loss_s, loss_h, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss_s, loss_h, top1_err, top5_err = (
loss_s.item(),
loss_h.item(),
top1_err.item(),
top5_err.item()
)
batch_size = inputs_source[0].size(0)*max(cfg.NUM_GPUS, 1)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss_s,
lr,
batch_size,
)
# write to tensorboard format if available.
if writer is not None:
dict2write = {
"Train/loss_s": loss_s,
"Train/loss_h": -loss_h,
"Train/lr": lr,
"Train/Top1_err": top1_err,
"Train/Top5_err": top5_err,
}
writer.add_scalars(dict2write, global_step=data_size * cur_epoch + cur_iter)
if cfg.TENSORBOARD.DIST_VIS.ENABLE and (data_size * cur_epoch + cur_iter)%cfg.TENSORBOARD.DIST_VIS.LOG_PERIOD==1:
writer.add_confusion_matrix(
torch.argmax(torch.cat(train_meter.all_source_weak, dim=0), dim=1),
torch.cat(train_meter.all_source_labels, dim=0),
tag="Confusion/Labeled",
global_step=data_size * cur_epoch + cur_iter
)
writer.add_confusion_matrix(
torch.argmax(torch.cat(train_meter.all_target_weak, dim=0), dim=1),
torch.cat(train_meter.all_target_labels, dim=0),
tag="Confusion/Unlabeled",
global_step=data_size * cur_epoch + cur_iter
)
if cfg.TENSORBOARD.SAMPLE_VIS.ENABLE and (data_size * cur_epoch + cur_iter)%cfg.TENSORBOARD.SAMPLE_VIS.LOG_PERIOD==0:
writer.add_video_pred(
lab_inputs[0],
torch.argmax(lab_preds, dim=1),
lab_labels,
tag="Sample/Source",
global_step = data_size * cur_epoch + cur_iter,
)
writer.add_video_pred(
unl_inputs[0],
torch.argmax(unl_preds, dim=1),
unl_labels,
tag="Sample/Target",
global_step = data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.update_predictions(
lab_preds, lab_feats, lab_labels,
unl_preds, unl_feats, unl_labels, prototypes,
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
torch.cuda.synchronize()
train_meter.iter_tic()
del inputs_source, inputs_target_unl, labels_source, labels_target_unl
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
del inputs_target_lab, labels_target_lab
# in case of fragmented memory
torch.cuda.empty_cache()
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.TENSORBOARD.EPOCH_LOG.ENABLE:
writer.writer.add_scalars(
"Error/Top1_err",
{"Train": train_meter.num_top1_mis / train_meter.num_samples}, global_step=cur_epoch
)
writer.writer.add_scalars(
"Error/Top5_err",
{"Train": train_meter.num_top5_mis / train_meter.num_samples}, global_step=cur_epoch
)
if cfg.TENSORBOARD.CONFUSION_MATRIX.ENABLE:
all_preds = [pred.clone().detach() for pred in train_meter.all_source_strong]
all_labels = [label.clone().detach() for label in train_meter.all_source_labels]
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds,
labels=all_labels,
global_step=cur_epoch,
tag="Confusion/Train"
)
train_meter.reset()
@torch.no_grad()
def eval_epoch(
val_loader, model, val_meter, cur_epoch, cfg, writer=None
):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
preds, _ = model(inputs)
if cfg.DATA.MULTI_LABEL:
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, (1, 5))
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
if cfg.TENSORBOARD.SAMPLE_VIS.ENABLE and (len(val_loader) * cur_epoch + cur_iter)%cfg.TENSORBOARD.SAMPLE_VIS.LOG_PERIOD==0:
writer.add_video_pred(
inputs[0],
torch.argmax(preds, dim=1),
labels,
tag="Sample/Val",
global_step = len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.TENSORBOARD.EPOCH_LOG.ENABLE:
writer.writer.add_scalars(
"Error/Top1_err",
{"Val": val_meter.num_top1_mis / val_meter.num_samples}, global_step=cur_epoch
)
writer.writer.add_scalars(
"Error/Top5_err",
{"Val": val_meter.num_top5_mis / val_meter.num_samples}, global_step=cur_epoch
)
if cfg.TENSORBOARD.CONFUSION_MATRIX.ENABLE:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds,
labels=all_labels,
global_step=cur_epoch,
tag="Confusion/Val"
)
val_meter.reset()
def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True):
"""
Update the stats in bn layers by calculate the precise stats.
Args:
loader (loader): data loader to provide training data.
model (model): model to update the bn stats.
num_iters (int): number of iterations to compute and update the bn stats.
use_gpu (bool): whether to use GPU or not.
"""
def _gen_loader():
for inputs, *_ in loader:
if use_gpu:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
yield inputs
# Update the bn stats.
update_bn_stats(model, _gen_loader(), num_iters)
def train(cfg):
"""
Train a video model for many epochs on train set and evaluate it on val set.
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
# Set up environment.
du.init_distributed_training(cfg)
# Set random seed from configs.
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
# Setup logging format.
logging.setup_logging(cfg.OUTPUT_DIR)
# Print config.
logger.info("Train with config:")
logger.info(pprint.pformat(cfg))
# Build the video model and print model statistics.
cfg.EXTRACT.ENABLE = True
cfg.SWIN.TEMP = cfg.MME.TEMP
cfg.SWIN.ETA = cfg.MME.ETA
model = build_model(cfg)
if du.is_master_proc() and cfg.LOG_MODEL_INFO:
misc.log_model_info(model, cfg, use_train_input=True)
# Construct the optimizer.
sub_modules = []
if cfg.NUM_GPUS > 1:
for name, sub_module in model.module.named_modules():
if name!="head":
sub_modules.append(sub_module)
else:
for name, sub_module in model.named_modules():
if name!="head":
sub_modules.append(sub_module)
backbone = nn.Sequential(*sub_modules)
classifier = model.module.get_submodule("head")
optimizer_f = optim.construct_optimizer(backbone, cfg)
optimizer_c = optim.construct_optimizer(classifier, cfg)
optimizers = [optimizer_f, optimizer_c]
# Create a GradScaler for mixed precision training
scaler = torch.cuda.amp.GradScaler(enabled=cfg.TRAIN.MIXED_PRECISION)
# Load a checkpoint to resume training if applicable.
start_epoch = cu.load_train_checkpoint(cfg, model, optimizer_f,
scaler if cfg.TRAIN.MIXED_PRECISION else None)
# Create the video train and val loaders.
if cfg.ADAPTATION.SEMI_SUPERVISED.ENABLE:
source_cfg = copy.deepcopy(cfg)
source_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.SOURCE
source_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.TARGET
source_loader = loader.construct_loader(source_cfg, "train")
val_loader = loader.construct_loader(source_cfg, "val")
target_lab_cfg = copy.deepcopy(cfg)
target_lab_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.TARGET
target_lab_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.SOURCE
target_lab_cfg.TRAIN.BATCH_SIZE = int(cfg.ADAPTATION.ALPHA * source_cfg.TRAIN.BATCH_SIZE)
target_lab_loader = loader.construct_loader(target_lab_cfg, "lab")
target_unl_cfg = copy.deepcopy(cfg)
target_unl_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.TARGET
target_unl_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.SOURCE
target_unl_cfg.TRAIN.BATCH_SIZE = int(cfg.ADAPTATION.BETA * source_cfg.TRAIN.BATCH_SIZE)
target_unl_loader = loader.construct_loader(target_unl_cfg, "unl")
bn_cfg = copy.deepcopy(cfg)
bn_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.SOURCE + cfg.ADAPTATION.TARGET
bn_cfg.ADAMATCH.ENABLE = False
precise_bn_loader = (
loader.construct_loader(bn_cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
train_loaders = [source_loader, target_unl_loader, target_lab_loader]
else:
source_cfg = copy.deepcopy(cfg)
source_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.SOURCE
source_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.TARGET
source_loader = loader.construct_loader(source_cfg, "train")
val_loader = loader.construct_loader(source_cfg, "val")
target_unl_cfg = copy.deepcopy(cfg)
target_unl_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.TARGET
target_unl_cfg.DATA.IMDB_FILES.VAL = cfg.ADAPTATION.SOURCE
target_unl_cfg.TRAIN.BATCH_SIZE = int(cfg.ADAPTATION.BETA * source_cfg.TRAIN.BATCH_SIZE)
target_unl_loader = loader.construct_loader(target_unl_cfg, "train")
bn_cfg = copy.deepcopy(cfg)
bn_cfg.DATA.IMDB_FILES.TRAIN = cfg.ADAPTATION.SOURCE + cfg.ADAPTATION.TARGET
bn_cfg.ADAMATCH.ENABLE = False
precise_bn_loader = (
loader.construct_loader(bn_cfg, "train", is_precise_bn=True)
if cfg.BN.USE_PRECISE_STATS
else None
)
train_loaders = [source_loader, target_unl_loader]
# Create meters.
train_meter = AdaMeter(len(train_loaders[0]), cfg)
val_meter = ValMeter(len(val_loader), cfg)
# set up writer for logging to Tensorboard format.
if cfg.TENSORBOARD.ENABLE and du.is_master_proc(
cfg.NUM_GPUS * cfg.NUM_SHARDS
):
writer = tb.TensorboardWriter(cfg)
else:
writer = None
# Perform the training loop.
logger.info("Start epoch: {}".format(start_epoch + 1))
epoch_timer = EpochTimer()
for cur_epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):
# Shuffle the dataset.
for train_loader in train_loaders:
loader.shuffle_dataset(train_loader, cur_epoch)
# Train for one epoch.
epoch_timer.epoch_tic()
train_epoch(
train_loaders,
model,
optimizers,
scaler,
train_meter,
cur_epoch,
cfg,
writer,
)
epoch_timer.epoch_toc()
logger.info(
f"Epoch {cur_epoch} takes {epoch_timer.last_epoch_time():.2f}s. Epochs "
f"from {start_epoch} to {cur_epoch} take "
f"{epoch_timer.avg_epoch_time():.2f}s in average and "
f"{epoch_timer.median_epoch_time():.2f}s in median."
)
logger.info(
f"For epoch {cur_epoch}, each iteraction takes "
f"{epoch_timer.last_epoch_time()/len(train_loaders[0]):.2f}s in average. "
f"From epoch {start_epoch} to {cur_epoch}, each iteraction takes "
f"{epoch_timer.avg_epoch_time()/len(train_loaders[0]):.2f}s in average."
)
is_checkp_epoch = cu.is_checkpoint_epoch(
cfg,
cur_epoch,
None
)
is_eval_epoch = misc.is_eval_epoch(
cfg,
cur_epoch,
None
)
# Compute precise BN stats.
if (
(is_checkp_epoch or is_eval_epoch)
and cfg.BN.USE_PRECISE_STATS
and len(get_bn_modules(model)) > 0
):
calculate_and_update_precise_bn(
precise_bn_loader,
model,
min(cfg.BN.NUM_BATCHES_PRECISE, len(precise_bn_loader)),
cfg.NUM_GPUS > 0,
)
_ = misc.aggregate_sub_bn_stats(model)
# Save a checkpoint.
if is_checkp_epoch:
cu.save_checkpoint(
cfg.OUTPUT_DIR,
model,
optimizer_f,
cur_epoch,
cfg,
scaler if cfg.TRAIN.MIXED_PRECISION else None,
)
# Evaluate the model on validation set.
if is_eval_epoch:
eval_epoch(
val_loader,
model,
val_meter,
cur_epoch,
cfg,
writer,
)
if writer is not None:
writer.close()
raise SystemExit('Training Ends')
| alimottaghi/slowfast | tools/train_mme.py | train_mme.py | py | 22,644 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "slowfast.utils.logging.get_logger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "slowfast.utils.logging",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "slowfast.models.optimizer.get_epoch_lr",
"line_number": 88,
"usage_type": "call"... |
14731423365 | from datetime import datetime
import pandas as pd
import pydash as _
from bs4 import BeautifulSoup
from Base import NSEBase
class NSE(NSEBase):
"""
A class to interact with NSE (National Stock Exchange) API.
Attributes:
valid_pcr_fields : list of valid fields for put-call ratio calculation
Methods:
__init__ : Initialize the NSE class
get_option_chain : Get the option chain for a given ticker
get_raw_option_chain : Get the raw option chain data for a given ticker
get_options_expiry : Get the next expiry date for a given ticker
get_all_derivatives_enabled_stocks : Get the list of equities available for derivatives trading
get_equity_future_trade_info : Get the trade information of active future contracts for a given ticker
get_equity_options_trade_info : Get the trade information of equity options for a given ticker
_mapped_index_ticker_for_futures : Get the mapped index ticker for index futures
get_index_futures_data : Get the data for index futures of a given index or ticker
get_currency_futures : Get the data for currency futures
get_commodity_futures : Get the data for commodity futures
get_pcr : Get the put-call ratio for a given ticker and expiry date
"""
def __init__(self) -> None:
"""
The __init__ function is called when the class is instantiated.
It sets up the session and headers for all subsequent requests.
:param self: Represent the instance of the class
:return: Nothing
"""
super().__init__()
self.valid_pcr_fields = ['oi', 'volume']
# ----------------------------------------------------------------------------------------------------------------
# Utility Functions
def get_option_chain(self, ticker: str, is_index: bool = True, expiry: datetime = None) -> pd.DataFrame:
"""
The get_option_chain function takes a ticker as input and returns the option chain for that ticker. The
function uses the try_n_times_get_response function to get a response from NSE's API, which is then converted
into a DataFrame using pd.json_normalize.
:param self: Represent the instance of the class
:param ticker: Specify the stock ticker for which we want to get the option chain its also called symbol in
NSE
:param is_index: (optional) Boolean value Specifies the given ticker is an index or not
:param expiry: (optional) It takes the `expiry date` in the datetime format of the options contracts,
default is very next expiry day
:return: A dataframe with option chain
"""
params = {'symbol': ticker}
if is_index:
url = f'{self._base_url}/api/option-chain-indices'
else:
url = f'{self._base_url}/api/option-chain-equities'
response = self.hit_and_get_data(url, params=params)
if expiry is None:
df = pd.DataFrame(pd.json_normalize(_.get(response, 'filtered.data', {}), sep='_')).set_index('strikePrice')
else:
df = pd.DataFrame(pd.json_normalize(_.get(response, 'records.data', {}), sep='_')).set_index('strikePrice')
df = df[df['expiryDate'] == expiry.strftime('%d-%b-%Y')]
return df
def get_raw_option_chain(self, ticker: str, is_index: bool = True) -> dict:
"""
The get_option_chain function takes a ticker as input and returns the option chain for that ticker.
The function uses the try_n_times_get_response function to get a response from NSE's API, which is
then converted into a DataFrame using pd.json_normalize.
:param is_index: Boolean value Specifies the given ticker is an index or not
:param self: Represent the instance of the class
:param ticker: Specify the stock ticker for which we want to get the option chain
:return: A dataframe with option chain data
"""
params = {'symbol': ticker}
if is_index:
url = f'{self._base_url}/api/option-chain-indices'
else:
url = f'{self._base_url}/api/option-chain-equities'
response = self.hit_and_get_data(url, params=params)
return response
def get_options_expiry(self, ticker: str, is_index: bool = False) -> datetime:
"""
The get_expiry function takes in a ticker and returns the next expiry date for that ticker.
The function uses the NSE API to get all expiry dates for a given ticker, sorts them in ascending order,
and then returns the nth element of this sorted list.
:param self: Represent the instance of the class
:param ticker: Specify the ticker / symbol for which we want to get the expiry date
:param is_index: Boolean value Specifies the given ticker is an index or not
:return: The very next expiry date
"""
params = {'symbol': ticker}
if is_index:
url = f'{self._base_url}/api/option-chain-indices'
else:
url = f'{self._base_url}/api/option-chain-equities'
response = self.hit_and_get_data(url, params=params)
dates = sorted([datetime.strptime(date_str, "%d-%b-%Y") for date_str in
response.get('records', {}).get('expiryDates', [])])
return dates
# ----------------------------------------------------------------------------------------------------------------_
# Equity Futures
def get_all_derivatives_enabled_stocks(self) -> list:
"""
The get_all_derivatives_enabled_stocks provides the list of Equities available for derivative trading
:param self: Represent the instance of the class
:return: List of all Equities tickers / symbols for which derivative trading is allowed
"""
response = self.hit_and_get_data(f'{self._base_url}/api/master-quote')
return response
def get_equity_future_trade_info(self, ticker: str) -> pd.DataFrame:
"""
The get_equity_future_trade_info provides all active future contracts trade information including its price
details
:param self: Represent the instance of the class
:param ticker: Specify the ticker / symbol for which we want to get the expiry date
:return: A DataFrame of trade info data of Equity Future contracts
"""
params = {'symbol': ticker}
response = self.hit_and_get_data(f'{self._base_url}/api/quote-derivative', params=params)
future_data = []
for fno_data in response.get('stocks', []):
if fno_data.get('metadata', {}).get('instrumentType') == 'Stock Futures':
future_data.append(fno_data)
df = pd.DataFrame(pd.json_normalize(future_data, sep='_'))
df['ticker'] = response.get('info', {}).get('symbol', '')
df['companyName'] = response.get('info', {}).get('companyName', '')
df['industry'] = response.get('info', {}).get('industry', '')
df['fut_timestamp'] = response.get('fut_timestamp', '')
return df
# ----------------------------------------------------------------------------------------------------------------
# Equity Options
def get_equity_options_trade_info(self, ticker: str) -> pd.DataFrame:
"""
Gets equity options trade information for a given ticker.
:param ticker: Ticker symbol of the equity options trade.
:return: DataFrame containing the trade information.
"""
params = {'symbol': ticker}
response = self.hit_and_get_data(f'{self._base_url}/api/quote-derivative', params=params)
future_data = []
for fno_data in response.get('stocks', []):
if fno_data.get('metadata', {}).get('instrumentType') == 'Stock Options':
future_data.append(fno_data)
df = pd.DataFrame(pd.json_normalize(future_data, sep='_'))
df['ticker'] = response.get('info', {}).get('symbol', '')
df['companyName'] = response.get('info', {}).get('companyName', '')
df['industry'] = response.get('info', {}).get('industry', '')
df['opt_timestamp'] = response.get('opt_timestamp', '')
return df
# ----------------------------------------------------------------------------------------------------------------
# Index Futures
def _mapped_index_ticker_for_futures(self) -> dict:
"""
Mapped index ticker will give dict of available options with its corresponding ticker value
:param self: Represent the instance of the class
:return: A dict obj with all FUTURES mappings
"""
response = self.session.get(f'{self._base_url}//market-data/equity-derivatives-watch',
headers=self.headers)
soup = BeautifulSoup(response.text, features="html5lib")
all_derivative_options = soup.find_all('option', attrs={"rel": "derivative"})
mapped_index_ticker = {}
for i in all_derivative_options:
mapped_index_ticker[i.get_text().lower()] = i['value']
return mapped_index_ticker
def get_index_futures_data(self, index_or_ticker: str) -> pd.DataFrame:
"""
Fetches index futures data.
:param self: Represent the instance of the class
:param index_or_ticker: Name or ticker symbol of the index.
:return: DataFrame containing the FUTURES data
"""
index_or_ticker = index_or_ticker.lower()
mapped_tickers = {}
try:
mapped_tickers = self._mapped_index_ticker_for_futures()
except Exception as err:
print(
f'Exception in fetching mapped ticker for this index try to pass actual ticker in the next call, '
f'Exact error : {err}')
if index_or_ticker in mapped_tickers.keys():
ticker_to_used = mapped_tickers[index_or_ticker]
else:
ticker_to_used = index_or_ticker
params = {'index': ticker_to_used}
response = self.hit_and_get_data(f'{self._base_url}/api/liveEquity-derivatives', params=params)
df = pd.DataFrame(response.get('data', []))
return df
# ----------------------------------------------------------------------------------------------------------------
# Currency
def get_currency_futures(self) -> pd.DataFrame:
"""
Fetches currency futures data.
:param self: Represent the instance of the class
:return: DataFrame containing the currency futures data
"""
params = {'index': 'live_market_currency', 'key': 'INR'}
response = self.hit_and_get_data(
f'{self._base_url}/api/liveCurrency-derivatives', params=params)
df = pd.DataFrame(response.get('data', []))
return df
# ----------------------------------------------------------------------------------------------------------------
# Commodity
def get_commodity_futures(self) -> pd.DataFrame:
"""
Fetches commodity futures data.
:param self: Represent the instance of the class
:return: Pd.DataFrame: DataFrame containing the currency futures data
"""
response = self.hit_and_get_data(f'{self._base_url}/api/liveCommodity-derivatives')
df = pd.DataFrame(response.get('data', []))
return df
def get_pcr(self, ticker: str, is_index: bool = True, on_field: str = 'OI', expiry: datetime = None) -> float:
"""
Calculate the put-call ratio (PCR) for a given ticker.
:param self: Represent the instance of the class
:param ticker: The ticker symbol.
:param is_index: Boolean value Specifies the given ticker is an index or not
:param expiry: The expiry date of the option contract. Defaults to None.
:param on_field: The field to calculate PCR on. `Volume` or `oi` (open-interest) Default to 'OI'.
:return: The calculated PCR value
"""
on_field = on_field.lower()
if on_field not in self.valid_pcr_fields:
print(f'Un-supported filed is passed only these are the fields available : {self.valid_pcr_fields}')
return 0
if expiry is None:
df = self.get_option_chain(ticker, is_index=is_index)
else:
df = self.get_option_chain(ticker, is_index=is_index, expiry=expiry)
if df.shape[0] == 0:
print('Your filters lead to empty DataSet check all params, expiry, etc; returning 0 as default')
return 0
if on_field == 'oi':
put_oi = df['PE_openInterest'].sum()
call_oi = df['CE_openInterest'].sum()
return put_oi / call_oi
else:
put_vol = df['PE_totalTradedVolume'].sum()
call_vol = df['CE_totalTradedVolume'].sum()
return put_vol / call_vol | Sampad-Hegde/Bharat-SM-Data | Bharat_sm_data/Derivatives/NSE.py | NSE.py | py | 13,281 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "Base.NSEBase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pandas.json_normalize... |
17388621437 | # Import necessary Tkinter and sqlite3 libraries.
import tkinter as tk
import sqlite3
from sqlite3 import Error
from PIL import Image, ImageTk
import tkinter.messagebox as messagebox
# Making things object oriented, define a class.
class School_Data:
'''Constructor to initialize the GUI window'''
def __init__(self):
self.root = tk.Tk()
self.root.geometry('1200x700')
self.connection = self.create_connection()
self.home()
self.root.mainloop()
self.connection.close()
def home(self):
# Clear the screen and display the home screen
self.clear_screen()
# Create a menubar with two menus File and Action
# From the File Menu the application can be closed
# From the Action menu a message can be displayed.
self.menubar = tk.Menu(self.root)
self.filemenu = tk.Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label='Close', command=self.close)
self.filemenu.add_separator()
self.filemenu.add_command(label='Close without question', command=exit)
self.actionmenu = tk.Menu(self.menubar, tearoff=0)
self.actionmenu.add_command(label='Show Message', command=self.show_message)
self.menubar.add_cascade(menu = self.filemenu, label='File')
self.menubar.add_cascade(menu = self.actionmenu, label='Action')
self.root.config(menu = self.menubar)
# Create a label for the application title
self.label = tk.Label(self.root, text="Sample School Data", font=("Calibri", 24))
self.label.pack(padx=20, pady=20)
# Load and display an image
image = Image.open("school_image.jpg")
image = image.resize((800,300))
self.photo = ImageTk.PhotoImage(image)
image_label = tk.Label(self.root, image=self.photo)
image_label.pack(padx=10, pady=10)
# Create a frame for the buttons
self.homeframe = tk.Frame(self.root)
self.homeframe.pack(padx=20, pady=20)
# Add buttons for Add, Search, and Extra functionality
self.add_button_in_frame(self.homeframe,"Add",0,0, self.add)
self.add_button_in_frame(self.homeframe,"Search",0,1, self.search)
self.add_button_in_frame(self.homeframe,"Extra",0,2, self.extra)
def add_button_in_frame(self, parent, text, row, col, *commands):
"""
Create a button and place it in a frame within the parent widget.
Args:
parent (tk.Widget): The parent widget.
text (str): The text to display on the button.
row (int): The row number within the parent's grid layout.
col (int): The column number within the parent's grid layout.
*commands (callable): The command(s) to associate with the button.
Returns:
tk.Button: The created button.
"""
button = tk.Button(parent, text=text, font=("Arial", 14))
button.grid(row=row, column=col)
for cmd in commands:
button.config(command = lambda c=cmd: c())
return button
def add_button(self, text, command):
"""
Create a button and place it in the root window with standard padding.
Args:
text (str): The text to display on the button.
command (callable): The command(s) to associate with the button.
"""
button = tk.Button(self.root, text=text, font=("Arial", 14), command=command)
button.pack(padx=10, pady=10)
def add(self):
"""
Displays the screen for adding a new entry.
"""
self.clear_screen()
# Create a label for the add screen title
self.label = tk.Label(self.root, text="Add a new Entry", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
self.addframe = tk.Frame(self.root)
self.addframe.pack(padx=10, pady=10)
# Create input fields for name, age, and class
self.create_label_and_entry(self.addframe, "Name", 0, "Name", "")
self.create_label_and_entry(self.addframe, "Age", 1, "Age", "")
self.create_label_and_entry(self.addframe, "Class", 2, "Class", "")
self.addbtnframe = tk.Frame(self.root)
self.addbtnframe.pack(padx=10, pady=10)
# Add buttons to add the entry and return to the home screen
self.add_button_in_frame(self.addbtnframe,"Add",0,1, self.connection_add)
self.add_button_in_frame(self.addbtnframe,"Home",0,2, self.home)
# Method to connect to database and pass the entries to save
def connection_add(self):
"""
Add the new entry to the SQLite database.
"""
try:
data_entry = '''CREATE TABLE IF NOT EXISTS Stud_Data (name TEXT, age INT, class INT)'''
self.connection.execute(data_entry,)
data_insert = '''INSERT INTO Stud_Data (name, age, class) VALUES (?,?,?)'''
data_insert_tuple = (
self.Name.get('1.0', 'end-1c'),
self.Age.get('1.0', 'end-1c'),
self.Class.get('1.0', 'end-1c')
)
# If any space is left blank, prompt user to enter all details else, execute the data entry
# and display respective messages.
if '' in data_insert_tuple:
messagebox.showinfo(title='Error', message='Kindly fill in all the details')
else:
cursor = self.connection.cursor()
cursor.execute(data_insert, data_insert_tuple)
self.connection.commit()
messagebox.showinfo(title='Congratulations!', message='Entry added Successfully!')
self.clear_text(self.addframe)
except Error as e:
print(e)
def search(self):
"""
Displays the screen for searching an entry.
"""
self.clear_screen()
# Create a label for the search screen title
self.label = tk.Label(self.root, text="Search an Entry", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
# Create frame for search input field
self.searchframe = tk.Frame(self.root)
self.searchframe.pack(padx=10, pady=10)
self.attribute = tk.Label(self.searchframe, text="Search by", font=("Arial", 14))
self.attribute.grid(row=0, column=0)
# Define a variable to store the attribute name selected by user by which user wants to search
self.sel_string = tk.StringVar()
# Define option menu to select Name, Age or Class and store value in variable
self.attribute_sel = tk.OptionMenu(self.searchframe, self.sel_string, *["Name", "Age", "Class"])
self.attribute_sel.grid(row=1, column=0)
# Text input by user which will be searched in the database
self.search_value = tk.Text(self.searchframe, height=1, font=("Arial", 12))
self.search_value.grid(row=1, column=1)
# Add buttons to search the entry and return to the home screen
self.add_button("Search", self.connection_search)
self.add_button("Home", self.home)
def connection_search(self):
"""
Search for entries in the SQLite database.
"""
try:
# Search user given text input in user selected attribute column of database
search_column = self.sel_string.get()
search_querry = "SELECT * FROM Stud_Data WHERE {} = ?".format(search_column)
cursor = self.connection.cursor()
# if text input is left blank, prompt user to enter a text
# else store search results from database in global variable self.info
if self.search_value.get('1.0', 'end-1c') == '':
messagebox.showinfo(title='Error!', message='Kindly enter value for search')
else:
cursor.execute(search_querry, (self.search_value.get('1.0', 'end-1c'),))
self.info = cursor.fetchall()
self.disp_search_results(self.info)
self.connection.commit()
except Error as e:
print(e)
def disp_search_results(self, info):
'''Displays all the results of search command in database
Args:
info: list of all the rows from database that correspond to user search
'''
# Clear any previously displayed search results
self.clear_search_results()
# Create label for results of search
self.label = tk.Label(self.root, text="Search Results", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
# Create frame to display all matching results
self.dispframe = tk.Frame(self.root)
self.dispframe.pack(fill = 'y')
# Create a variable to store the value of radiobutton
self.rbvar = tk.StringVar()
# if no matching result is found, display No Results found!
# else display results
if len(info) == 0:
self.label_nor = tk.Label(self.root, text="No Results found!", font=("Arial", 16))
self.label_nor.pack(padx=20, pady=20)
# Create radiobutton for each row of result
# if a row is selected, option to edit or delete the row pops up
else:
for i, row in enumerate(info, start=1):
self.rb = tk.Radiobutton(self.dispframe, variable=self.rbvar, value = i, command=self.enable_options)
self.rb.grid(row=i, column=0)
for j, val in enumerate(row):
label = tk.Label(self.dispframe, text=val, relief=tk.RAISED, width=15, font=("Arial", 14))
label.grid(row=i, column=j+1, sticky= tk.W + tk.E)
def enable_options(self):
'''Method to display Edit and Delete buttons only on selection of a row'''
present = False
for widget in self.root.winfo_children():
if isinstance(widget, tk.Button) and (widget.cget('text') == 'Edit'):
present = True
if present == False:
# If buttons not already present, create frame for buttons
self.searchbtnframe = tk.Frame(self.root)
self.searchbtnframe.pack(padx=10,pady=10)
self.add_button_in_frame(self.searchbtnframe, 'Edit', 0,0, self.edit)
self.add_button_in_frame(self.searchbtnframe, 'Delete', 0,1, self.delete_entry)
def edit(self):
''' Edit the selected row in database'''
# Extracting details of selected row
selected_row = int(self.rbvar.get()) -1
(name, age, classl) = self.info[selected_row]
# Clear screen for Edit screen
self.clear_screen()
# Create label for Edit screen
self.label = tk.Label(self.root, text="Update an Entry", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
# Create frame for text entries that should replace the existing entry
self.editframe = tk.Frame(self.root)
self.editframe.pack(padx=10, pady=10)
self.create_label_and_entry(self.editframe, "Name", 0, "Name", "")
self.create_label_and_entry(self.editframe, "Age", 1, "Age", "")
self.create_label_and_entry(self.editframe, "Class", 2, "Class", "")
# Create a frame for buttons to execute the edit function or cancel the process
self.editbtnframe = tk.Frame(self.root)
self.editbtnframe.pack(padx=10, pady=10)
self.add_button_in_frame(self.editbtnframe,"Update",0,1, lambda: self.edit_entry(self.info[int(self.rbvar.get()) - 1]))
self.add_button_in_frame(self.editbtnframe,"Cancel",0,2, self.clear_text)
self.add_button_in_frame(self.editbtnframe,"Back",0,3, self.search)
self.add_button_in_frame(self.editbtnframe,"Home",0,4, self.home)
def edit_entry(self, entry):
''' Method to execute the edit in Sqlite database'''
edit_query = '''UPDATE Stud_Data SET name=?, age=?, class=? WHERE name=? AND age=? AND class=?'''
data_edit_tuple = (self.Name.get('1.0', 'end-1c'), self.Age.get('1.0', 'end-1c'), self.Class.get('1.0', 'end-1c'))
# If any field is left blank, prompt user to fill all details
if '' in data_edit_tuple:
messagebox.showinfo(title='Error', message='Kindly fill in all the details')
else:
cursor = self.connection.cursor()
cursor.execute(edit_query,
(self.Name.get('1.0', 'end-1c'),
self.Age.get('1.0', 'end-1c'),
self.Class.get('1.0', 'end-1c'),
entry[0], entry[1], entry[2]))
self.connection.commit()
messagebox.showinfo(title='Congratulations!', message='Entry updated Successfully!')
# Clear the text fields after operation
self.clear_text(self.editframe)
def delete_entry(self):
'''Delete the selected entry'''
# Confirm if user really wants to delete the entry
sure = messagebox.askyesnocancel(title='Delete?', message='''Are you sure you want to delete this entry?''')
if sure == True:
cursor = self.connection.cursor()
selected_row = int(self.rbvar.get()) -1
(name, age, classl) = self.info[selected_row]
delete_query = '''DELETE from Stud_Data WHERE
name = ? AND age = ? AND class = ?'''
cursor.execute(delete_query, (name, age, classl))
self.connection.commit()
messagebox.showinfo(title="Success", message="Entry deleted successfully!")
self.connection_search()
def create_label_and_entry(self, parent, text, row, entry_name, default_value):
"""
Create a label, an entry field, and place them in a frame within the parent widget.
Args:
parent (tk.Widget): The parent widget.
label_text (str): The text to display on the label.
row (int): The row number within the parent's grid layout.
entry_placeholder (str): The placeholder text for the entry field.
entry_default (str): The default value for the entry field.
Returns:
tuple: A tuple containing the label and entry field widgets.
"""
label = tk.Label(parent, text=text, font=("Arial", 14))
label.grid(sticky=tk.W + tk.E)
entry = tk.Text(parent, height=1, font=("Arial", 12))
entry.bind("<KeyPress>", self.shortcut)
entry.insert("1.0", default_value)
entry.grid(row=row, column=1, sticky=tk.W + tk.E)
setattr(self, entry_name, entry)
def clear_text(self, frame):
''' Method to clear text fields if present on the screen'''
text_entry = [widget for widget in frame.winfo_children() if isinstance(widget, tk.Text)]
for element in text_entry:
element.delete('1.0', 'end')
def create_connection(self):
'''Method to create connection with the Sqlite database'''
try:
connection = sqlite3.connect(r"c:\Users\rsahu\Documents\git_files\Repo1\data.db")
return connection
except Error as e:
print(e)
def clear_search_results(self):
''' Method to refresh and clear previously displyed results in case of new search or deleted entry'''
for widget in self.root.winfo_children():
if isinstance(widget, tk.Frame) and widget != self.searchframe:
widget.destroy()
elif isinstance(widget, tk.Label) and widget.cget('text') == 'Search Results':
widget.destroy()
def shortcut(self, event):
''' Method to enable function through shortcut keys'''
#print(event.keysym, event.state)
if event.keysym == 'Return':
self.connection_add()
if event.keysym == 'Tab':
current_widget = event.widget
current_widget.tk_focusNext().focus()
return 'break'
def extra(self):
"""
Displays the screen for extra functionality (placeholder).
"""
self.clear_screen()
# Create a label for the extra screen title
self.label = tk.Label(self.root, text="Extra Functionality", font=("Arial", 20))
self.label.pack(padx=20, pady=20)
self.extrabtnframe = tk.Frame(self.root)
self.extrabtnframe.pack(padx=10, pady=10)
# Add button to go back to the home screen
self.add_button_in_frame(self.extrabtnframe, "Back", 0, 0, self.home)
def clear_screen(self):
'''Method to clear screen of widgets on the window'''
for widget in self.root.winfo_children():
widget.destroy()
def show_message(self):
'''Method to show message when asked from Actionmenu'''
messagebox.showinfo(title='Information', message='This is a sample GUI for entry of data of students in a school')
def close(self):
'''Method to kill the application window'''
if messagebox.askyesno(title="Quit?", message='Do you really want to quit?'):
self.root.destroy()
# Instantiate the School_Data class to start the application.
if __name__ == '__main__':
School_Data() | rohan-sahuji/Repo1 | Tkinter_GUI.py | Tkinter_GUI.py | py | 17,391 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tkinter.Tk",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tkinter.Menu",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tkinter.Menu",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tkinter.Menu",
"line_number"... |
44042814684 | import pandas as pd
import re
import graphlab as gl
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from nltk.stem.wordnet import WordNetLemmatizer
from helper import *
class Registries(object):
def __init__(self, filepath):
self.filepath = filepath
self.data = None
def preprocess_registries_data(self):
self.data = pd.read_csv(self.filepath)
self.data['product_details'] = [x.strip('[]').split(',') for x in self.data['product_details']]
self.data['product_att'] = [x.strip('[]').split(',') for x in self.data['product_att']]
self.data['product_name'] = [p[0].strip('u\'').decode('unicode_escape').encode('ascii','ignore') for p in self.data.product_details]
self.data['product_url'] = [x[-1].strip(' u\'') for x in self.data.product_details]
self.data['product_id'] = [int(re.search(r'/(\d+)\?',x).group(1)) if x!='' else '' for x in self.data.product_url]
self.data = self.data[self.data.product_id != ''] # convert to integer for graphlab models
self.data['color'] = [x[0].strip(' u\'') for x in self.data.product_att]
self.data['color_scheme'] = ['NEUTRAL' if type(x) is float else 'BLUE' if 'BLUE' in x.split() else 'PINK' if 'PINK' in x.split() else 'NEUTRAL' for x in self.data.color]
self.data['size_others'] = [x[1].strip(' u\'') if type(x) is str else '' for x in self.data.product_att]
# self.data['price'] = self.data.price.astype(float).fillna(0.0)
self.data = self.data.drop(['product_details', 'product_att'], axis=1)
return self.data
def load_registry_data(self, data):
self.data = data
def create_registry_df(self):
# Create registries dataframe
self.registries = self.data[['id', 'product_id']]
self.registries['requested'] = 1
return self.registries
def create_items_df(self):
self.items = self.data[['product_id', 'product_name','color', 'color_scheme', 'size_others','price']]
self.get_item_category_with_NMF()
return self.items
def tfidf_item_desc(self):
self.items['desc'] = [x+' '+y for x, y in zip(self.items.product_name, self.items.size_others)]
corpus = self.items['desc'].values
wordnet = WordNetLemmatizer()
docs_wordnet = [[wordnet.lemmatize(word) for word in re.split('\W+', words)] for words in corpus]
stop_words = ['baby', 'child', 'infant', 'newborn', 'in', 'with', 'of', '+', '&', 'and', 'by']
self.items.vectorizer = TfidfVectorizer(stop_words=stop_words)
self.items.doc_term_mat = self.items.vectorizer.fit_transform(corpus)
# feature_words = self.items.vectorizer.get_feature_names()
return self.items.doc_term_mat
def get_item_category_with_NMF(self, num_category=4):
self.items.doc_term_mat = self.tfidf_item_desc()
nmf = NMF(n_components=num_category)
W_sklearn = nmf.fit_transform(self.items.doc_term_mat)
H_sklearn = nmf.components_
items_cat_ind = np.argsort(W_sklearn, axis=1)
self.items['category'] = items_cat_ind[:,-1] # get the top category
return self.items
def get_item_pairwise_dist(self, metric='cosine'):
tfidf_arr = self.items.doc_term_mat.toarray()
dist_mat = pairwise_distances(tfidf_arr, metric)
return dist_mat
def dummify(df,column_name, drop_first = False):
dummies = pd.get_dummies(df[column_name], prefix = column_name, drop_first = False)
df = df.drop(column_name, axis = 1)
return pd.concat([df,dummies], axis = 1)
def to_SFrame(self, categorical_cols):
'''
categorical_cols: list of column names for categorical variables
'''
items_gl = self.items.dropna()
reg_gl = self.registries.dropna()
for col in categorical_cols:
items_gl = dummify(items_gl, col)
items_gl = gl.SFrame(items_gl)
reg_gl = gl.SFrame(reg_gl)
return reg_gl, items_gl
def train_test_split(self, test_proportion = 0.2):
reg_gl, _ = self.to_SFrame
train, test = gl.recommender.util.random_split_by_user(dataset = reg_gl,
user_id = 'id',
item_id = 'product_id',
max_num_users = 1000,
item_test_proportion = 0.2,
random_seed = 100)
return train, test
| vynguyent/Expecting-the-unexpected | Model/registries.py | registries.py | py | 4,677 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nltk.stem.wordnet.WordNetLemmatizer",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.spli... |
34711984830 | # Coding Math Episode 2
# Display a sine wave
import pygame
import math
import numpy as np
pygame.init()
RED = pygame.color.THECOLORS['red']
screen = pygame.display.set_mode((800, 600))
screen_rect = screen.get_rect()
print(f"Size of the screen ({screen_rect.width}, {screen_rect.height})")
screen_fonts = pygame.font.SysFont("monospace", 12)
label = screen_fonts.render("Press key up or down to change the period...",
1, (255,255,0))
pygame.display.set_caption("Episode 2")
main_loop = True
amplifier = 200
angles = np.arange(0.0, math.pi * 4, 0.01)
while main_loop:
pygame.time.delay(100)
for event in pygame.event.get():
if (event.type == pygame.QUIT
or event.type == pygame.KEYDOWN
and event.key == pygame.K_ESCAPE):
main_loop = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
amplifier += 5
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
amplifier -= 5
screen.fill((0,0,0))
for angle in angles:
x = angle * amplifier
y = math.sin(angle) * amplifier
pygame.draw.rect(screen, RED, (x, -y + screen_rect.height/2, 2, 2), 1)
screen.blit(label, ((screen_rect.width - label.get_rect().width) // 2,
(screen_rect.height - 20)))
pygame.display.update()
pygame.quit()
| piquesel/coding-math | ep2.py | ep2.py | py | 1,397 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.color",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display",
... |
75079239548 | from dal import autocomplete
from django import forms
from .models import Tag
class TForm(forms.ModelForm):
class Meta:
model = Tag
fields = ('Tag_name')
widgets = {
'Tag_name': autocomplete.ModelSelect2(url='test')
}
| codebottlehun/WithMe | tag/forms.py | forms.py | py | 270 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.Tag",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "dal.autocomplete.Mo... |
70281053308 | from typing import Dict, Any, Union, Optional, List
import torch
import numpy as np
from overrides import overrides
from transformers import ViltProcessor
from PIL import Image
from allennlp.data.fields.field import DataArray
from allennlp.data.fields.metadata_field import MetadataField
class ViltField(MetadataField):
"""
A class representing a tensor, which could have arbitrary dimensions.
A batch of these tensors are padded to the max dimension length in the batch
for each dimension.
"""
__slots__ = ["metadata", "vilt_processor", "vilt_half_precision"]
def __init__(self, metadata: Any,
vilt_processor: ViltProcessor,
vilt_half_precision: bool = True) -> None:
super(ViltField, self).__init__(metadata)
self.metadata = metadata
self.vilt_processor = vilt_processor
self.vilt_half_precision = vilt_half_precision
@overrides
def batch_tensors(self, tensor_list: List[DataArray]) -> List[DataArray]: # type: ignore
texts = []
images = []
for tensor in tensor_list:
text = tensor['text']
texts.append(text)
image = tensor['image']
image_data = Image.open(image).convert("RGB")
images.append(image_data)
processed = self.vilt_processor(text = texts,
images=images,
return_tensors='pt',
padding=True)
to_ret = {}
for k, v in processed.items():
if self.vilt_half_precision and (isinstance(v, torch.FloatTensor) or isinstance(v, torch.cuda.FloatTensor)):
processed[k] = v.half()
to_ret[k] = processed[k]
return to_ret | esteng/ambiguous_vqa | models/allennlp/data/fields/vilt_field.py | vilt_field.py | py | 1,820 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "allennlp.data.fields.metadata_field.MetadataField",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "transformers.ViltProcessor",
"line_number": 21,
"usage_type": "name"
},
... |
9369376357 | import pyodbc
cnxn = pyodbc.connect("DRIVER={ODBC Driver 17 for SQL Server};"
"Server=DESKTOP-0A2HT13;"
"Database=Databricks;"
"UID=prajwal;"
"PWD=Prajwal082;"
"Trusted_Connection=yes;")
cursor = cnxn.cursor()
cursor.execute('SELECT * FROM [dbo].[Customer]')
for row in cursor:
print('row = %r' % (row,))
# import pyodbc
# conn_str = pyodbc.connect(
# 'Driver={org.postgresql.Driver};'
# 'Server=localhost;'
# 'Port=5432;'
# 'Database=Test;'
# 'UID=postgres;'
# 'PWD=1234;'
# )
# conn = pyodbc.connect(conn_str, autocommit=True) # Error occurs here
# cursor = cnxn.cursor()
# cursor.execute('select * from students')
# for row in cursor:
# print('row = %r' % (row,)) | Prajwal082/Main | postgres.py | postgres.py | py | 814 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyodbc.connect",
"line_number": 3,
"usage_type": "call"
}
] |
36079540438 | import atexit
import json
import logging
import os
# needs install
import websocket
from log.timeutil import *
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
import log.encoder
try:
import thread
except ImportError:
import _thread as thread
class BitWs:
'''logging utility using bitmex realtime(websockets) API'''
def __init__(self, log_file_dir=os.sep + "tmp", flag_file_name = os.sep + "tmp" + os.sep + "BITWS-FLG", id = None, fix_file=None):
self.last_action = None
self.log_file_root_name = None
self.log_file_name = None
self.ws = None
self.log_file_dir = log_file_dir
self.last_time = 0
self.compress = True
self.terminate_count = 200
self.terminated_by_peer = False
self.fix_file = fix_file
if id:
self.pid = id
else:
self.pid = str(os.getpid())
self.reset()
self.flag_file_name = flag_file_name
if not self.fix_file:
self.rotate_file()
def __del__(self):
# self.dump_message()
self.rotate_file()
self.remove_terminate_flag()
def reset(self):
self.last_message = None
self.reset_timestamp()
def reset_timestamp(self):
self.last_time = int(timestamp())
def get_flag_file_name(self):
return self.flag_file_name
def create_terminate_flag(self):
self.remove_terminate_flag()
file_name = self.get_flag_file_name()
with open(file_name + "tmp", "w") as file:
file.write(self.get_process_id())
file.close()
os.rename(file_name + "tmp", file_name)
def check_terminate_flag(self):
file_name = self.get_flag_file_name()
if os.path.isfile(file_name):
with open(file_name, "r") as file:
id = file.readline()
if id != self.get_process_id():
self.terminate_count = self.terminate_count - 1
if self.terminate_count == 0:
return True
return False
def get_process_id(self):
return self.pid
def remove_terminate_flag(self):
file_name = self.get_flag_file_name()
if os.path.isfile(file_name):
os.remove(file_name)
def rotate_file(self):
if self.log_file_name:
if os.path.isfile(self.log_file_name):
os.rename(self.log_file_name, self.log_file_root_name)
timestring = time_stamp_string().replace(":", "-").replace('+', '-')
self.log_file_root_name = self.log_file_dir + os.sep + 'BITLOG' + self.get_process_id() + '-' + timestring + ".log"
self.log_file_name = self.log_file_root_name + ".current"
def dump_message(self):
if self.last_message is None:
return
self.dump_message_line(self.last_message)
self.reset()
def dump_message_line(self, message):
message['TIME'] = self.last_time
if self.fix_file:
file_name = self.fix_file
else:
file_name = self.log_file_name
with open(file_name, "a") as file:
json_string = json.dumps(message, separators=(',', ':'))
if self.compress:
file.write(log.encoder.encode(json_string))
else:
file.write(json_string)
file.write('\n')
def remove_symbol(self, message):
for m in message['data']:
del (m['symbol'])
def on_message(self, ws, message):
message = json.loads(message)
table = message['table'] if 'table' in message else None
if table == "orderBookL2":
self.remove_symbol(message)
self.on_order_book_message(ws, message)
elif table == "funding":
self.remove_symbol(message)
self.on_funding_message(ws, message)
elif table == "trade":
self.remove_symbol(message)
self.on_trade_message(ws, message)
def on_trade_message(self, ws, message):
# logger.debug("trade")
self.dump_message_line(self.strip_trade_message(message))
def strip_trade_message(self, message):
data = message['data']
side = None
price = 0
size = 0
last_time_stamp = data[0]['timestamp']
for d in data:
if last_time_stamp != d['timestamp']:
break
side = d['side']
price = d['price']
size += d['size']
del(data[1:])
data[0]['side'] = side
data[0]['price'] = price
data[0]['size'] = size
del(data[0]['grossValue'], data[0]['homeNotional'], data[0]['trdMatchID'], data[0]['foreignNotional'])
return message
def on_funding_message(self, ws, message):
logger.debug("funding")
self.dump_message_line(message)
pass
def on_order_book_message(self, ws, message):
action = message['action'] if 'action' in message else None
if action == 'partial':
logger.debug("partial")
self.rotate_file()
self.create_terminate_flag()
current_time = int(timestamp())
if current_time == self.last_time and self.last_action == action and action != None:
if self.last_message != None:
self.last_message['data'] += message['data']
else:
self.last_message = message
else:
if self.last_message != None:
self.dump_message()
self.last_message = message
self.reset_timestamp()
self.last_action = action
if self.check_terminate_flag():
self.ws.close()
self.rotate_file()
self.terminated_by_peer = True
logger.debug("terminated")
def on_error(self, ws, error):
logger.debug(error)
def on_close(self, ws):
logger.debug("### closed ###")
def on_open(self, ws):
ws.send('{"op": "subscribe", "args": ["funding:XBTUSD", "orderBookL2:XBTUSD", "trade:XBTUSD"]}')
def start(self):
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://www.bitmex.com/realtime",
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open)
self.ws.run_forever(ping_interval=70, ping_timeout=30)
if __name__ == "__main__":
bitmex = BitWs(fix_file='/tmp/bit.log')
atexit.register(bitmex.rotate_file)
bitmex.start()
| yasstake/mmf | log/bitws.py | bitws.py | py | 6,831 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DE... |
73675802426 | # This script fills the newly created point geofield
# coding=utf-8
import os, sys
proj_path = "/home/webuser/webapps/tigaserver/"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tigaserver_project.settings")
sys.path.append(proj_path)
os.chdir(proj_path)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
import csv
import string
import random
from django.contrib.auth.models import User, Group
from tigaserver_app.models import EuropeCountry
USERS_FILE = '/home/webuser/Documents/filestigaserver/registre_usuaris_aimcost/test_users_14072020.csv'
def split_name(s):
split = s.split(" ")
name = split[0]
first_name = split[1]
return { "name": name, "last_name": first_name }
def get_username(s):
split = split_name(s)
elem1 = split['name'][0].lower()
elem2 = split['last_name'].lower().split("-")[0]
return elem1 + "." + elem2
def generate_password( size=6, chars= string.ascii_uppercase + string.ascii_lowercase + string.digits ):
return ''.join(random.choice(chars) for _ in range(size))
def delete_euro_users():
users = User.objects.filter(groups__name='eu_group_europe')
for u in users:
u.delete()
def delete_users():
with open(USERS_FILE) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
name = row[0]
username = get_username(name)
try:
user = User.objects.get(username=username)
user.delete()
except User.DoesNotExist:
print("User with username {0} not found".format(name))
def make_user_regional_manager(user, country):
user.userstat.national_supervisor_of = country
user.save()
def assign_user_to_country(user, country):
user.userstat.native_of = country
user.save()
def perform_checks():
with open(USERS_FILE) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
country_iso = row[7]
try:
print("Looking for country {0} with iso_code {1}".format(row[2], row[7]))
e = EuropeCountry.objects.get(iso3_code=country_iso)
print("Exists, doing nothing")
except EuropeCountry.DoesNotExist:
print("{0} country with iso_code {1} does not exist".format(row[2],row[7]))
try:
eu_group = Group.objects.get(name="eu_group_europe")
except Group.DoesNotExist:
print("Eu group does not exist, create")
eu_group = Group.objects.create(name="eu_group_europe")
eu_group.save()
try:
es_group = Group.objects.get(name="eu_group_spain")
except Group.DoesNotExist:
print("Es group does not exist, create")
es_group = Group.objects.create(name="eu_group_spain")
es_group.save()
def check_users_by_email(comparison_file, output_file_name):
ignore_list = ['katja.kalan@gmail.com','isis.sanpera@upf.edu','mallorca@moscardtigre.com','r.eritja@creaf.uab.es','delacour@unizar.es','dbravo.barriga@gmail.com']
with open(comparison_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
email = row[1]
if email not in ignore_list:
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
print("User with name {0} - {1} is not in database".format(row[0],row[1]))
def inactivate_euro_users():
euro_users = User.objects.filter(groups__name='eu_group_europe')
for user in euro_users:
user.is_active = False
user.save()
def create_users(add_users_to_euro_groups=True, ignore_regional_managers = False):
perform_checks()
experts_group = Group.objects.get(name="expert")
with open(USERS_FILE) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
name = row[0]
email = row[1]
country = row[2]
sp = split_name(name)
#username = get_username(name)
username = row[3]
password = row[4]
country_iso = row[7]
user = User.objects.create_user(username=username,first_name=sp['name'],last_name=sp['last_name'],email=email,password=password)
if add_users_to_euro_groups:
regional_group = Group.objects.get(name=row[5])
regional_group.user_set.add(user)
experts_group.user_set.add(user)
country = EuropeCountry.objects.get(iso3_code=country_iso)
assign_user_to_country(user,country)
if not ignore_regional_managers:
if row[6] == '1':
print("Making user regional manager")
make_user_regional_manager(user, country)
print("{0} {1} {2}".format( username, email, password ))
create_users(add_users_to_euro_groups=False, ignore_regional_managers = True)
#perform_checks()
#delete_users()
#check_users_by_email('/home/webuser/Documents/filestigaserver/registre_usuaris_aimcost/user_check.csv','')
| Mosquito-Alert/mosquito_alert | util_scripts/create_aimsurv_experts.py | create_aimsurv_experts.py | py | 5,288 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "os.environ.setdefault",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"li... |
38776144324 | import os
import webbrowser
from shutil import copyfile
import random
import cv2
import pickle
from moviepy.editor import *
from flask import Flask, render_template, redirect, url_for, request
from flaskwebgui import FlaskUI
pickle_base = "C:\\Users\\AI\\AIVideo_Player\\data\\"
image_directory = 'C:\\Users\\AI\\Code\\VideoPlayer\\engine\\static\\images'
n_recent_files = 3
current_directory = ''
allowed_images = []
video_file_types = ['flv', 'mp4', 'avi', 'webm', 'mov', 'mpeg', 'wmv', 'mp3', 'MP4', 'mkv', 'MKV', 'AVI', 'MPEG', 'WEBM']
def pick(picklefile):
picklefile = pickle_base+picklefile
if os.path.isfile(picklefile):
with open(picklefile, 'rb') as f:
folders = pickle.load(f)
else:
folders = {}
return folders
def cache(item, picklefile):
picklefile = pickle_base+picklefile
with open(picklefile, 'wb') as f:
pickle.dump(item, f)
# ff = FFmpeg(executable='C:\\ffmpeg\\bin\\ffmpeg.exe', inputs={folder+folders[folder]['last_file']: None}, outputs={"C:\\Users\\AI\\AIVideo_Player\\data\\recntly_played\\thumbnail"+str(count)+".png": ['-vf', 'fps=1']})
# ff.run()
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# do your logic as usual in Flask
@app.route("/")
def index():
favourites = pick('favourites.pickle')
folders = pick('cache.pickle')
backup_gif = ''
for file in favourites:
gif_filename = 'C:\\Users\\AI\\Code\\VideoPlayer\\engine\\static\\images\\' + os.path.basename(file) + '.gif'
if favourites[file]['changed'] or not os.path.isfile(gif_filename):
try:
seconds = favourites[file]['time'] - 3.5
clip = (VideoFileClip(file).subclip(seconds, seconds+7.5))
clip.write_gif(gif_filename)
except OSError:
pass
favourites[file]['changed'] = False
cache(favourites, 'favourites.pickle')
for folder in folders:
filename = 'C:\\Users\\AI\\Code\\VideoPlayer\\engine\\static\\images\\' + folders[folder]['filename'] + '.png'
backup_gif = folders[folder]['filename'] +'.gif'
gif_filename = 'C:\\Users\\AI\\Code\\VideoPlayer\\engine\\static\\images\\' + backup_gif
if not os.path.isfile(filename):
cap = cv2.VideoCapture(folders[folder]['full_path'])
cap.set(1, 100)
res, frame = cap.read()
cv2.imwrite(filename, frame)
try:
clip = (VideoFileClip(folders[folder]['full_path']).subclip((1, 7.7), (1, 14.12)))
clip.write_gif(gif_filename)
except OSError:
pass
print(favourites)
if favourites != {}:
favourite_gif = os.path.basename(random.choice(list(favourites)))+'.gif'
else:
favourite_gif = backup_gif
path = "index.html"
print(favourite_gif)
return render_template(path, folders=folders, favourite_gif=favourite_gif)
@app.route('/viewer', defaults={'_file_path': 'sample'})
@app.route('/viewer/<_file_path>')
def viewer(_file_path):
folders = pick('cache.pickle')
time_dict = pick('time_dict.pickle')
file_path = _file_path.replace('>', '\\')
dirname, filename = os.path.dirname(file_path), os.path.basename(file_path)
folders[dirname] = {
'full_path': str(file_path),
'filename': str(filename)
}
try:
last_time = time_dict[file_path]
except KeyError:
last_time = 0.0
time_dict[file_path] = 0.0
folders[dirname]['last_time'] = last_time
# folder_stack = pick('folder_stack.pickle')
folder_stack = list(folders)
folder_stack.append(dirname)
while len(folder_stack)>n_recent_files+1:
try:
del folders[folder_stack[0]]
folder_stack.remove(folder_stack[0])
except KeyError:
folder_stack.remove(folder_stack[0])
cache(folders, 'cache.pickle')
cache(time_dict, 'time_dict.pickle')
cache(folder_stack, 'folder_stack.pickle')
view_locaiton = os.getcwd()+url_for('static', filename='images/'+filename)
allowed_images.append(os.path.basename(view_locaiton))
try:
copyfile(file_path, view_locaiton)
except FileNotFoundError:
pass
path = "viewer.html"
filename = os.path.basename(view_locaiton)
while len(allowed_images)>4:
allowed_images.remove(allowed_images[0])
print(filename)
return render_template(path, file_name=url_for('static', filename='images/'+filename), full_file_path=_file_path, last_time=last_time, _filename=filename.replace('%20', ' '))
@app.route("/folders", defaults={'_path': '?'})
@app.route("/folders/<_path>")
def folders(_path):
folder_stack = pick('folder_stack.pickle')
path = _path.replace('>', '\\')
if any(path.endswith(_) for _ in video_file_types):
return redirect("http://127.0.0.1:5000/viewer/"+path.replace('\\', '>'))
elif path == '?':
try:
path = folder_stack[-1]
except KeyError:
path = 'C:\\'
elif path.endswith('<<'):
path = os.path.dirname(path)
elif path == '<<<':
path = 'C:\\'
f = lambda s: path+"\\"+s
try:
folders_full_path = list(map(f, os.listdir(path)))
folders_list = os.listdir((path))
except NotADirectoryError:
return "AIVIDEO_PLAYER does not support this file type"
return render_template('folders.html', folders_full_path=folders_full_path, folders_list=folders_list, directory=path)
@app.route("/changeVideo", defaults={'param': ' '})
@app.route("/changeVideo/", methods=['POST', 'GET'])
def changeVideo():
last_video = request.args.get('last_video')
last_video = last_video.replace('>', '\\')
last_video = last_video.replace('<', ' ')
last_time = request.args.get('last_time')
favourite = request.args.get('favourite')
favourite_time = request.args.get('favouriteTime')
command = request.args.get('command')
folders = pick('cache.pickle')
time_dict = pick('time_dict.pickle')
favourites = pick('favourites.pickle')
directory = os.path.dirname(last_video)
filename = os.path.basename(last_video)
if favourite == 'true':
print('adding to favourite')
favourites[last_video] = {'time':float(favourite_time),'changed':True}
cache(favourites, 'favourites.pickle')
folders[directory] = {
'full_path': str(last_video),
'filename': str(filename),
'last_time': float(last_time)
}
time_dict[last_video] = last_time
cache(time_dict, 'time_dict.pickle')
cache(folders, 'cache.pickle')
_dir_list = os.listdir(directory)
dir_list = [_ for _ in _dir_list if any(_.endswith(__) for __ in video_file_types)]
if command == 'next':
next_file = directory + "\\" + dir_list[dir_list.index(filename) + 1]
return redirect("http://127.0.0.1:5000/viewer/" + next_file.replace('\\', '>'))
elif command == 'previous':
previous_file = directory + "\\" + dir_list[dir_list.index(filename) - 1]
return redirect("http://127.0.0.1:5000/viewer/" + previous_file.replace('\\', '>'))
elif command == 'backspace':
return redirect('http://127.0.0.1:5000/')
elif command == 'exit':
for file in os.listdir(image_directory):
if file not in [os.path.basename(_)+'.gif' for _ in favourites] and not file.startswith('icons8') and file not in [folders[__]['filename'] for __ in folders] and file not in [folders[__]['filename']+'.gif' for __ in folders] and file not in [folders[__]['filename']+'.png' for __ in folders] and file not in allowed_images:
os.remove(image_directory+'\\'+file)
exit()
return ''
# call the 'run' method
app.run()
print('done') | olusegvn/VideoPlayer | engine/AIVideoPlayerBackend.py | AIVideoPlayerBackend.py | py | 7,987 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.isfile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_numbe... |
31272615348 | # -*- coding: utf-8 -*-
"""Image transformation test meant to be run with pytest."""
import sys
import pytest
from confmap import ImageTransform
from confmap import HyperbolicTiling
sys.path.append("tests")
def test_tilesAndTransform():
im=ImageTransform('./examples/sample1.png',0,data=None
,c=1.*(1.+0.j),r=1.*(1.+0.j)
,d=0.08+0.55j,output_width=750
,output_height=1000,blur=False,smoothshift=-0,shift=0.)
im.mirror(Y=2,X=1)
res=im.transform(print_and_save=False)
HT=HyperbolicTiling('./examples/sample1.png',prefix='./examples/',suffix='0',
output_width=1550,output_height=640,data=res)
im=ImageTransform(HT,d=0.04)
im.arctan()
im.similitude(c=1.9)
HT.transform(c=0.95,d=0.+0.0j,backcolor=True,vanishes=False,
nbit=25,delta=0e-3,print_and_save=True,
sommets=(6,4,4,4,6,4,4,4))
return True
if __name__ == "__main__":
pytest.main() | FCoulombeau/confmap | tests/test_tilesAndTransforms.py | test_tilesAndTransforms.py | py | 1,016 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "confmap.ImageTransform",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "confmap.Hyperbol... |
11415062176 | """
[
[
[
"M: How long have you been teaching in this middle school?",
"W: For ten years. To be frank, I'm tired of teaching the same textbook for so long though I do enjoy being a teacher. I'm considering trying something new."
],
[
{
"question": "What's the woman probably going to do?",
"choice": [
"To teach a different textbook.",
"To change her job.",
"To learn a different textbook."
],
"answer": "To change her job."
},
{
"question": "If the man and his wife go on the recommended package tour, how much should they pay?",
"choice": [
"$1,088.",
"$1,958.",
"$2,176."
],
"answer": "$1,958."
}
],
"14-349"
],
...
"""
import json
import argparse
from pathlib import Path
from typing import Dict, List, Mapping, Generator, Optional, Union
from copy import deepcopy
import itertools
import re
import logging
from .reader import DatasetReader
from .types import (Sample, SingleQuestionSample,
SingleQuestionSingleOptionSample, NLIWithOptionsSample,
PureNLISample)
from dataclasses import dataclass, asdict
logger = logging.getLogger(__name__)
class DreamReader(DatasetReader):
def __init__(self,
input_type: str = 'DreamJSON',
output_type: str = 'SingleQuestionSample'):
if input_type != 'DreamJSON':
raise ValueError(f"{input_type} unsupported")
self.input_type = input_type
self.output_type = output_type
self.fitb_pattern = re.compile(r'_+')
def _read_data(self, path: Path) -> Dict:
with open(path) as f:
samples = json.load(f)
return samples
def read(self, path: Path,
return_dict: bool = False) -> List[Union[Sample, Dict]]:
def reader_func(p: Path) -> List[Sample]:
samples = self._read_data(p)
# Give names to fields
json_samples = []
for s in samples:
json_samples.append({
'passage': s[0],
'questions': s[1],
'id': s[2]
})
return json_samples
if self.output_type == 'SingleQuestionSample':
def sample_converter(x: Dict) -> Dict:
# Do some preprocessing here
# combine the dialogue sentences
x['passage'] = ' '.join(x['passage'])
# fix fitb format
for q_n, q in enumerate(x['questions']):
x['questions'][q_n]['question'] = self.fitb_pattern.sub(
'_', x['questions'][q_n]['question'])
# number the answer
for q_n, question in enumerate(x['questions']):
# this will throw if answer does not match one of the
# choices exactly
idx = question['choice'].index(question['answer'])
question['answer'] = idx
return x # do nothing
def aggregate_converter(
x: List[Dict]) -> List[SingleQuestionSample]:
all_res = []
for s in x:
para = s['passage']
for q_n, q in enumerate(s['questions']):
all_res.append(
SingleQuestionSample(
id=s['id'] + f"_{q_n}",
question=q['question'],
article=para,
options=q['choice'],
answer=q['answer']))
return all_res
else:
raise ValueError(f"outpu_type {self.output_type} not supported")
input_samples = [sample_converter(s) for s in reader_func(path)]
output_samples = aggregate_converter(input_samples)
if return_dict:
return [s.__dict__ for s in output_samples]
else:
return output_samples
| nli-for-qa/conversion | qa2nli/qa_readers/dream.py | dream.py | py | 4,251 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "reader.DatasetReader",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
... |
5671705163 | import random
import uuid
import pytest
from aws.src.database.domain.dynamo_domain_objects import Tenure, HouseholdMember, TenuredAsset, Asset, AssetTenure, \
Patch, Person, PersonTenure
def test_generates_tenure(tenure_dict: dict):
tenure = Tenure.from_data(tenure_dict)
assert isinstance(tenure, Tenure)
assert tenure.id == str(uuid.uuid4())
assert isinstance(tenure.tenuredAsset, TenuredAsset)
assert tenure.tenuredAsset.id == str(uuid.uuid4())
assert isinstance(tenure.householdMembers[0], HouseholdMember)
assert tenure.householdMembers[0].fullName == 'FAKE_First FAKE_Last'
def test_generates_asset(asset_dict: dict):
asset = Asset.from_data(asset_dict)
assert isinstance(asset, Asset)
assert asset.id == asset_dict.get('id')
assert asset.assetAddress.get('addressLine1') == asset_dict.get('assetAddress').get('addressLine1')
assert isinstance(asset.tenure, AssetTenure)
assert asset.tenure.id == asset_dict.get('tenure').get('id')
assert isinstance(asset.patches[0], Patch)
assert asset.patches[0].id == asset_dict.get('patches')[0].get('id')
def test_generates_person(person_dict: dict):
person = Person.from_data(person_dict)
assert isinstance(person, Person)
assert person.id == person_dict.get('id')
assert isinstance(person.tenures[0], PersonTenure)
assert person.tenures[0].id == person_dict.get('tenures')[0].get('id')
@pytest.fixture
def tenure_dict():
return {
"id": str(uuid.uuid4()),
"charges": {
"billingFrequency": "Weekly",
"combinedRentCharges": 0,
"combinedServiceCharges": 0,
"currentBalance": 3019.14,
"originalRentCharge": 0,
"originalServiceCharge": 0,
"otherCharges": 0,
"rent": 0,
"serviceCharge": 0,
"tenancyInsuranceCharge": 0
},
"endOfTenureDate": "2017-11-06",
"evictionDate": "1900-01-01",
"householdMembers": [
{
"id": str(uuid.uuid4()),
"dateOfBirth": "1066-07-29",
"fullName": "FAKE_First FAKE_Last",
"isResponsible": True,
"personTenureType": "Tenant",
"type": "person"
}
],
"informHousingBenefitsForChanges": False,
"isMutualExchange": False,
"isSublet": False,
"legacyReferences": [
{
"name": "uh_tag_ref",
"value": f"{random.randint(10 ** 7, 10 ** 8 - 1)}/01"
},
{
"name": "u_saff_tenancy",
"value": ""
}
],
"notices": [
{
"effectiveDate": "1900-01-01",
"endDate": None,
"expiryDate": "1900-01-01",
"servedDate": "1900-01-01",
"type": ""
}
],
"paymentReference": str(random.randint(10 ** 10, 10 ** 11 - 1)),
"potentialEndDate": "1900-01-01",
"startOfTenureDate": "2017-05-30",
"subletEndDate": "1900-01-01",
"successionDate": "1900-01-01",
"tenuredAsset": {
"id": str(uuid.uuid4()),
"fullAddress": "THE HACKNEY SERVICE CENTRE 1 Hackney Service Centre E8 1DY",
"propertyReference": str(random.randint(10 ** 7, 10 ** 8 - 1)),
"type": "Dwelling",
"uprn": str(random.randint(10 ** 12, 10 ** 13 - 1))
},
"tenureType": {
"code": "THO",
"description": "Temp Hostel"
},
"terminated": {
"isTerminated": True,
"reasonForTermination": ""
}
}
@pytest.fixture
def asset_dict():
return {
"id": str(uuid.uuid4()),
"assetAddress": {
"addressLine1": "FLAT 10 220 TEST ROAD",
"addressLine2": "HACKNEY",
"addressLine3": "LONDON",
"postCode": "E8 1AA",
"uprn": str(random.randint(10 ** 12, 10 ** 13 - 1))
},
"assetCharacteristics": {
"numberOfBedrooms": 1,
"numberOfLifts": 0,
"numberOfLivingRooms": 0,
"yearConstructed": "0"
},
"assetId": str(random.randint(10 ** 12, 10 ** 13 - 1)),
"assetLocation": {
"parentAssets": [
{
"id": str(uuid.uuid4()),
"name": "Hackney Homes",
"type": "NA"
}
],
"totalBlockFloors": 0
},
"assetManagement": {
"isCouncilProperty": False,
"isNoRepairsMaintenance": False,
"isTMOManaged": False,
"managingOrganisation": "London Borough of Hackney",
"managingOrganisationId": str(uuid.uuid4()),
"owner": "KUS",
"propertyOccupiedStatus": "VR"
},
"assetType": "Dwelling",
"isActive": 0,
"parentAssetIds": str(uuid.uuid4()),
"patches": [
{
"id": str(uuid.uuid4()),
"domain": "MMH",
"name": "SN4",
"parentId": str(uuid.uuid4()),
"patchType": "patch",
"responsibleEntities": [
{
"id": str(uuid.uuid4()),
"name": "Fake_First Fake_Last",
"responsibleType": "HousingOfficer"
}
],
"versionNumber": None
}
],
"rootAsset": "ROOT",
"tenure": {
"id": str(uuid.uuid4()),
"endOfTenureDate": "2050-12-12T00:00:00Z",
"paymentReference": str(random.randint(10 ** 12, 10 ** 13 - 1)),
"startOfTenureDate": "2030-12-12T00:00:00Z",
"type": "Secure"
},
"versionNumber": 3
}
@pytest.fixture
def person_dict():
return {
"id": str(uuid.uuid4()),
"dateOfBirth": "1962-04-18T00:00:00.0000000Z",
"firstName": "FAKE_First",
"lastModified": "2022-09-06T06:31:03.5321566Z",
"links": [
],
"personTypes": [
"Tenant",
"HouseholdMember"
],
"preferredFirstName": "FAKE_First",
"preferredSurname": "FAKE_Last",
"preferredTitle": "Reverend",
"surname": "FAKE_Last",
"tenures": [
{
"id": str(uuid.uuid4()),
"assetFullAddress": "2 Fake Road, N16 1AA",
"assetId": str(uuid.uuid4()),
"endDate": None,
"paymentReference": str(random.randint(10 ** 10, 10 ** 11 - 1)),
"propertyReference": str(random.randint(10 ** 7, 10 ** 8 - 1)),
"startDate": "2013-12-23",
"type": "Secure",
"uprn": "100021063882"
},
{
"id": str(uuid.uuid4()),
"assetFullAddress": "75 Fake Road, E5 1AA",
"assetId": str(uuid.uuid4()),
"endDate": "2012-10-26",
"paymentReference": str(random.randint(10 ** 10, 10 ** 11 - 1)),
"propertyReference": str(random.randint(10 ** 7, 10 ** 8 - 1)),
"startDate": "2012-04-19",
"type": "Temp Annex",
"uprn": str(random.randint(10 ** 12, 10 ** 13 - 1))
},
{
"id": str(uuid.uuid4()),
"assetFullAddress": "15 Fake Road N16 1AA",
"assetId": str(uuid.uuid4()),
"endDate": None,
"paymentReference": str(random.randint(10 ** 10, 10 ** 11 - 1)),
"propertyReference": str(random.randint(10 ** 7, 10 ** 8 - 1)),
"startDate": "1997-07-24T00:00:00.0000000Z",
"type": "Leasehold (RTB)",
"uprn": str(random.randint(10 ** 12, 10 ** 13 - 1))
}
],
"title": "Reverend",
"versionNumber": 1
}
| LBHackney-IT/mtfh-scripts | aws/tests/domain/test_dynamo_domain_objects.py | test_dynamo_domain_objects.py | py | 8,127 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "aws.src.database.domain.dynamo_domain_objects.Tenure.from_data",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aws.src.database.domain.dynamo_domain_objects.Tenure",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "aws.src.database.domain.dynam... |
26095879865 | import os
import sys
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import Optimizer
writer = SummaryWriter('./runs')
grad_clip = 1.0 # clip gradients at an absolute value of
save_prefix=''
def clip_gradient(optimizer, grad_clip):
# """
# 剪辑反向传播期间计算的梯度,以避免梯度爆炸。
#
# param optimizer:具有要剪裁的渐变的优化器
#
# :参数梯度剪辑:剪辑值
# """
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def train(train_iter, dev_iter, model, args):
# global args
global save_prefix
save_dir = args.save_dir
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filename = args.snapshot
save_prefix = os.path.join(save_dir, filename)
if args.snapshot:
snapshot = os.path.join(args.save_dir, args.snapshot)
if os.path.exists(snapshot):
print('\nLoading model from {}...\n'.format(snapshot))
model = torch.load(snapshot)['model']
optimizer=torch.load(snapshot)['optimizer']
else:
optimizer = Optimizer.Optimizer(
torch.optim.Adam(model.parameters(), betas=(0.9, 0.98), eps=1e-09))
if args.cuda:
model.cuda()
# optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
steps = 0
best_acc = 0
last_step = 0
model.train()
for epoch in range(1, args.epochs + 1):
for batch in train_iter:
feature, target = batch.text, batch.label
feature.t_(), target.sub_(1)
# w.add_graph(model, (feature,))
if args.cuda:
feature, target = feature.cuda(), target.cuda()
optimizer.zero_grad()
logits = model(feature)
loss = F.cross_entropy(logits, target)
loss.backward()
# Clip gradients
clip_gradient(optimizer.optimizer, grad_clip)
optimizer.step()
steps += 1
if steps % args.log_interval == 0:
corrects = (torch.max(logits, 1)[1].view(target.size()).data == target.data).sum()
train_acc = corrects / batch.batch_size
sys.stdout.write(
'\rBatch[{}] - loss: {:.6f} acc: {:.4f}({}/{})'.format(steps,
loss.item(),
train_acc,
corrects,
batch.batch_size))
writer.add_scalar('Batch/train_loss', loss.item() ,optimizer.step_num)
writer.add_scalar('Batch/learning_rate', optimizer.lr, optimizer.step_num)
if steps % args.test_interval == 0:
dev_acc = eval(dev_iter, model, args,optimizer)
if dev_acc > best_acc:
best_acc = dev_acc
last_step = steps
if args.save_best:
print('Saving best model, acc: {:.4f}\n'.format(best_acc))
save(model, best_acc,optimizer)
writer.add_scalar('best/acc', best_acc, optimizer.step_num)
elif steps - last_step >= args.early_stopping:
print('\nearly stop by {} steps, acc: {:.4f}'.format(args.early_stopping, best_acc))
raise KeyboardInterrupt
else:
# print(type(model.fc.weight),type(torch.load(save_prefix)['model'].fc.weight))
# print(torch.load(save_prefix)['model'].fc.weight==model.fc.weight)
w=model.fc.weight+ torch.load(save_prefix)['model'].fc.weight
# print('1')
b=model.fc.bias+ torch.load(save_prefix)['model'].fc.bias
model.fc.weight=torch.nn.Parameter(w/2)
model.fc.bias = torch.nn.Parameter(b / 2)
def eval(data_iter, model, args,optimizer):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label
feature.t_(), target.sub_(1)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
logits = model(feature)
loss = F.cross_entropy(logits, target)
avg_loss += loss.item()
corrects += (torch.max(logits, 1)
[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
avg_loss /= size
accuracy = corrects / size
print('\nEvaluation - loss: {:.6f} acc: {:.4f}({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
writer.add_scalar('Evaluation/train_loss', avg_loss, optimizer.step_num)
writer.add_scalar('Evaluation/learning_rate', optimizer.lr, optimizer.step_num)
return accuracy
def save(model, best_acc,optimizer):
state = {
'best_acc': best_acc,
'model': model,
'optimizer':optimizer}
torch.save(state, save_prefix)
| dubochao/CNN-sentiment-analysis | train.py | train.py | py | 5,496 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.ma... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.