seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73815019386 | import logging
from os import environ
from unittest.mock import patch
import pytest
from bonobo import settings
from bonobo.errors import ValidationError
TEST_SETTING = "TEST_SETTING"
def test_to_bool():
assert not settings.to_bool("")
assert not settings.to_bool("FALSE")
assert not settings.to_bool("NO")
assert not settings.to_bool("0")
assert settings.to_bool("yup")
assert settings.to_bool("True")
assert settings.to_bool("yes")
assert settings.to_bool("1")
def test_setting():
s = settings.Setting(TEST_SETTING)
assert s.get() is None
with patch.dict(environ, {TEST_SETTING: "hello"}):
assert s.get() is None
s.clear()
assert s.get() == "hello"
s = settings.Setting(TEST_SETTING, default="nope")
assert s.get() is "nope"
with patch.dict(environ, {TEST_SETTING: "hello"}):
assert s.get() == "nope"
s.clear()
assert s.get() == "hello"
s = settings.Setting(TEST_SETTING, default=0, validator=lambda x: x == 42)
with pytest.raises(ValidationError):
assert s.get() is 0
s.set(42)
with pytest.raises(ValidationError):
s.set(21)
def test_default_settings():
settings.clear_all()
assert settings.DEBUG.get() is False
assert settings.PROFILE.get() is False
assert settings.QUIET.get() is False
assert settings.LOGGING_LEVEL.get() == logging._checkLevel("INFO")
with patch.dict(environ, {"DEBUG": "t"}):
settings.clear_all()
assert settings.LOGGING_LEVEL.get() == logging._checkLevel("DEBUG")
settings.clear_all()
def test_check():
settings.check()
with patch.dict(environ, {"DEBUG": "t", "PROFILE": "t", "QUIET": "t"}):
settings.clear_all()
with pytest.raises(RuntimeError):
settings.check()
settings.clear_all()
| python-bonobo/bonobo | tests/test_settings.py | test_settings.py | py | 1,851 | python | en | code | 1,564 | github-code | 6 | [
{
"api_name": "bonobo.settings.to_bool",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bonobo.settings",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "bonobo.settings.to_bool",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bonob... |
74183020029 | # -*- coding: utf-8 -*-
from django.conf import urls
from django.contrib.auth import decorators
from .views import HistoriaCreateView
from .views import HistoriaDetailView
from .views import HistoriaPacienteListView
from .views import HistoriaUpdateView
HISTORIA_CREATE_URL_NAME = 'historia_create'
HISTORIA_UPDATE_URL_NAME = 'historia_update'
HISTORIA_DETAIL_URL_NAME = 'historia_detail'
HISTORIA_LIST_URL_NAME = 'historia_list'
urlpatterns = urls.patterns("",
urls.url(
regex=r'^nueva/$',
view=decorators.login_required(HistoriaCreateView.as_view()),
name=HISTORIA_CREATE_URL_NAME
),
urls.url(
regex=r'^editar/(?P<pk>\d+)$',
view=decorators.login_required(HistoriaUpdateView.as_view()),
name=HISTORIA_UPDATE_URL_NAME
),
urls.url(
regex=r'^(?P<pk>\d+)/$',
view=decorators.login_required(HistoriaDetailView.as_view()),
name=HISTORIA_DETAIL_URL_NAME
),
urls.url(
regex=r'^paciente/(?P<paciente_id>\d+)/$',
view=decorators.login_required(HistoriaPacienteListView.as_view()),
name=HISTORIA_LIST_URL_NAME
)
)
| gustavoatt/consultas | consultas_proyecto/historias_app/urls.py | urls.py | py | 1,138 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.urls",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "djang... |
25549629589 | # coding: utf-8
__author__ = "Ciprian-Octavian Truică"
__copyright__ = "Copyright 2020, University Politehnica of Bucharest"
__license__ = "GNU GPL"
__version__ = "0.1"
__email__ = "ciprian.truica@cs.pub.ro"
__status__ = "Production"
from tokenization import Tokenization
from vectorization import Vectorization
from topicmodeling import TopicModeling
import sys
import pandas as pd
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from multiprocessing import cpu_count
import time
def tkns(text):
title = tkn.createCorpus(text['title'], apply_FE=False)
content = tkn.createCorpus(text['content'], apply_FE=False)
clean_text = title + content
clean_text = ' '.join([' '.join(elem) for elem in clean_text])
return clean_text
def processElement(row):
title = tkn.createCorpus(row[0], apply_FE=False)
content = tkn.createCorpus(row[1], apply_FE=False)
clean_text = title + content
clean_text = ' '.join([' '.join(elem) for elem in clean_text])
return clean_text
if __name__ == '__main__':
fin = sys.argv[1]
num_topics = int(sys.argv[2])
num_words = int(sys.argv[3])
num_iterations = int(sys.argv[4])
no_threads = cpu_count() - 2
print("Start Read File!")
df = pd.read_csv(fin)
print("End Read File!")
print("Start Tokenization!")
start = time.time() * 1000
tkn = Tokenization()
# with UDF
# df = df.apply(tkns, axis=1)
# clean_texts = df.to_list()
clean_texts = []
with ProcessPoolExecutor(max_workers=no_threads) as worker:
for result in worker.map(processElement, df.to_numpy()):
if result:
clean_texts.append(result)
end = time.time() * 1000
print("Execution time (ms)", end - start)
print("End Tokenization!")
print("Start Vectorization!")
vec = Vectorization(clean_texts)
vec.vectorize()
id2word = vec.getID2Word()
corpus = vec.getTFIDFNorm()
print("End Vectorization!")
tm = TopicModeling(id2word=id2word, corpus=corpus)
print("Start Topic Modeling NNF!")
start = time.time()
topicsNMF = tm.topicsNMF(num_topics=num_topics, num_words=num_words, num_iterations=num_iterations)
print("=============NMF=============")
for topic in topicsNMF:
print("TopicID", topic[0], topic[1])
print("=============================")
end = time.time()
print("Execution time (ms)", end - start)
print("End Topic Modeling NNF!")
# print("Start Topic Modeling LDA!")
# print("=============LDA=============")
# topicsLDA = tm.topicsLDA(num_topics=num_topics, num_words=num_words, num_iterations=num_iterations)
# for topic in topicsLDA:
# print("TopicID", topic[0], topic[1])
# print("=============================")
# print("End Topic Modeling LDA!")
| cipriantruica/news_diffusion | news-topic-modeling/main.py | main.py | py | 2,856 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number... |
33875332851 | import torch
from care_nl_ica.independence.hsic import HSIC
class IndependenceChecker(object):
"""
Class for encapsulating independence test-related methods
"""
def __init__(self, hparams) -> None:
super().__init__()
self.hparams = hparams
self.test = HSIC(hparams.num_permutations)
print("Using Bonferroni = 4")
def check_bivariate_dependence(self, x1, x2):
decisions = []
var_map = [1, 1, 2, 2]
with torch.no_grad():
decisions.append(
self.test.run_test(x1[:, 0], x2[:, 1], bonferroni=4).item()
)
decisions.append(
self.test.run_test(x1[:, 0], x2[:, 0], bonferroni=4).item()
)
decisions.append(
self.test.run_test(x1[:, 1], x2[:, 0], bonferroni=4).item()
)
decisions.append(
self.test.run_test(x1[:, 1], x2[:, 1], bonferroni=4).item()
)
return decisions, var_map
def check_multivariate_dependence(
self, x1: torch.Tensor, x2: torch.Tensor
) -> torch.Tensor:
"""
Carries out HSIC for the multivariate case, all pairs are tested
:param x1: tensor of the first batch of variables in the shape of (num_elem, num_dim)
:param x2: tensor of the second batch of variables in the shape of (num_elem, num_dim)
:return: the adjacency matrix
"""
num_dim = x1.shape[-1]
max_edge_num = num_dim**2
adjacency_matrix = torch.zeros(num_dim, num_dim).bool()
print(max_edge_num)
with torch.no_grad():
for i in range(num_dim):
for j in range(num_dim):
adjacency_matrix[i, j] = self.test.run_test(
x1[:, i], x2[:, j], bonferroni=4 # max_edge_num
).item()
return adjacency_matrix
| rpatrik96/nl-causal-representations | care_nl_ica/independence/indep_check.py | indep_check.py | py | 1,920 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "care_nl_ica.independence.hsic.HSIC",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "to... |
74553223546 | import time
from datetime import datetime, timedelta
from num2words import num2words
# Todo: returns an timedelta:
def calculate_time(sleep_time: float) -> timedelta:
"""Function to calculate time to perform it's action,
which is takes a .
Args:
sleep_time (float) : Time that the function will take to be executed.
Returns:
string: A string containing the time needed to execute the loop
in the format hours:minutes:seconds.milliseconds
"""
start_time = datetime.now()
time.sleep(sleep_time)
end_time = datetime.now()
difference_time_function = end_time - start_time
return difference_time_function
def split_time(time: timedelta) -> dict:
"""This function takes the time and create a dictionary from it with the splitted values
Args:
time(str) : The time that the function took to be performed.
Returns:
splitted_time(dict): A dictionary containing how many hours, minutes, seconds and milliseconds are inside the time argument.
"""
seconds = time.seconds
hours = seconds // 3600
minutes = (seconds // 60) % 60
microseconds = time.microseconds
# timer = time.split(":")
# sec = timer[2].split(".")
splitted_time = {
"hours": hours,
"minutes": minutes,
"seconds": seconds,
"milliseconds": microseconds,
}
return splitted_time
def readable_time(splitted_time: dict) -> str:
"""This function gets a dictionary containing hours, minutes, seconds and milliseconds and
translate these numbers to a human comprehension
Args:
splitted_time(dict): Dictionary containing hours, minutes, seconds and milliseconds.
Returns:
str: How long the operation took to be performed in a human perspective.
"""
hours = splitted_time["hours"]
minutes = splitted_time["minutes"]
seconds = splitted_time["seconds"]
milliseconds = splitted_time["milliseconds"]
readable_time = ""
if hours > 0:
descriptive_hours = num2words(hours)
if hours == 1:
support = "hour"
else:
support = "hours"
readable_time += f"{descriptive_hours} {support}, "
if minutes > 0:
if minutes == 1:
support = "minute"
else:
support = "minutes"
descriptive_minutes = num2words(minutes)
readable_time += f"{descriptive_minutes} {support} and "
if seconds > 0:
descriptive_seconds = num2words(seconds)
if seconds == 1:
support = "second"
else:
support = "seconds"
readable_time += f"{descriptive_seconds} {support}"
if milliseconds > 0 and minutes < 1:
milli = str(milliseconds)
rounded_milliseconds = milli[0:2]
if int(rounded_milliseconds) == 1:
support = "millisecond"
else:
support = "milliseconds"
descriptive_milliseconds = num2words(rounded_milliseconds)
readable_time += f" and {descriptive_milliseconds} {support}"
return (
f"Your function took {readable_time} to run ({time_to_run_function})"
)
if __name__ == "__main__":
sleep_time = 1.5
time_to_run_function = calculate_time(sleep_time)
splitted_time = split_time(time_to_run_function)
human_time = readable_time(splitted_time)
print(human_time)
| bvmcardoso/pwn | challenge.py | challenge.py | py | 3,411 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.... |
73580429947 | import torch
import torchvision
import torchvision.datasets as datasets
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
def convert(imgf, labelf, outf, n):
f = open(imgf, "rb")
o = open(outf, "w")
l = open(labelf, "rb")
f.read(16)
l.read(8)
images = []
for i in range(n+1):
image = [ord(l.read(1))]
for j in range(28*28):
image.append(ord(f.read(1)))
images.append(image)
for image in images:
o.write(",".join(str(pix) for pix in image)+"\n")
f.close()
o.close()
l.close()
def visualize(index: int):
plt.title((train_labels[index]))
plt.imshow(train_data[index].reshape(28, 28), cmap=cm.binary)
plt.show()
def check_count_of_each_label():
y_value = np.zeros((1, 10))
for i in range(10):
print("Occurence of ", i, "=", np.count_nonzero(train_labels == i))
y_value[0, i-1] = np.count_nonzero(train_labels == i)
y_value = y_value.ravel()
x_value = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
plt.xlabel('label')
plt.ylabel('count')
plt.bar(x_value, y_value, 0.7, color='g')
plt.show()
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def sigmoid_backward(dA, cache):
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def softmax_backward(Z, cache):
Z = cache
length = 10
dZ = np.zeros((42000, 10))
Z = np.transpose(Z)
for row in range(0, 42000):
den = (np.sum(np.exp(Z[row, :])))*(np.sum(np.exp(Z[row, :])))
for col in range(0, 10):
sums = 0
for j in range(0, 10):
if (j != col):
sums = sums+(math.exp(Z[row, j]))
dZ[row, col] = (math.exp(Z[row, col])*sums)/den
dZ = np.transpose(dZ)
Z = np.transpose(Z)
assert (dZ.shape == Z.shape)
return dZ
# initializing the parameters weights and bias
def initialize_parameters_deep(layer_dims):
# np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.zeros(layer_dims[l],
layer_dims[l-1]) / np.sqrt(layer_dims[l-1]) # *0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
return parameters
# forward propagation
def linear_forward(A, W, b):
Z = np.dot(W, A) + b
cache = (A, W, b)
assert (Z.shape == (W.shape[0], A.shape[1]))
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
# print("Z="+str(Z))
A, activation_cache = relu(Z)
elif activation == "softmax":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = softmax(Z)
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
caches = []
A = X
# number of layers in the neural network
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(
A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation="relu")
caches.append(cache)
AL, cache = linear_activation_forward(
A, parameters['W' + str(L)], parameters['b' + str(L)], activation="softmax")
caches.append(cache)
return AL, caches
# cost function
def compute_cost(AL, Y):
m = Y.shape[1]
cost = (-1) * np.sum(np.multiply(Y, np.log(AL)))
# np.multiply(1 - Y, np.log(1 - AL)))
# print("cost="+str(cost))
return cost
# backward propagation
def linear_backward(dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1./m * np.dot(dZ, A_prev.T)
db = (1/m)*np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache
if activation == "relu":
# dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "softmax":
dZ = softmax_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
grads = {}
L = len(caches) # the number of layers
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
M = len(layers_dims)
current_cache = caches[M-2]
grads["dA"+str(M-1)], grads["dW"+str(M-1)], grads["db"+str(M-1)
] = linear_activation_backward(dAL, current_cache, activation="softmax") # M-1
for l in reversed(range(L-1)):
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(
grads["dA" + str(l + 2)], current_cache, activation="relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
# upgrade function for weights and bias
def update_parameters(parameters, grads, learning_rate):
for l in range(len_update-1):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - \
(learning_rate*grads["dW" + str(l+1)])
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - \
(learning_rate*grads["db" + str(l+1)])
return parameters
def plot_graph(cost_plot):
x_value = list(range(1, len(cost_plot)+1))
# print(x_value)
# print(cost_plot)
plt.xlabel('iteration')
plt.ylabel('cost')
plt.plot(x_value, cost_plot, 0., color='g')
def L_layer_model(X, Y, layers_dims, learning_rate, num_iterations, print_cost=False): # lr was 0.009
print("training...")
costs = []
cost_plot = np.zeros(num_iterations)
parameters = initialize_parameters_deep(layers_dims)
for i in range(0, num_iterations):
AL, caches = L_model_forward(X, parameters)
cost = compute_cost(AL, Y)
grads = L_model_backward(AL, Y, caches)
parameters = update_parameters(parameters, grads, learning_rate)
cost_plot[i] = cost
plot_graph(cost_plot)
return parameters
if __name__ == "__main__":
mnist = datasets.MNIST(
root='./data', download=True)
train = pd.DataFrame()
test = pd.DataFrame()
if os.path.exists('./data/MNIST/raw/mnist_train.csv'):
train = pd.read_csv("./data/MNIST/raw/mnist_train.csv")
else:
convert("./data/MNIST/raw/train-images-idx3-ubyte", "./data/MNIST/raw/train-labels-idx1-ubyte",
"./data/MNIST/raw/mnist_train.csv", 60000)
train = pd.read_csv("./data/MNIST/raw/mnist_train.csv")
if os.path.exists('./data/MNIST/raw/mnist_test.csv'):
test = pd.read_csv("./data/MNIST/raw/mnist_test.csv")
else:
convert("./data/MNIST/raw/t10k-images-idx3-ubyte", "./data/MNIST/raw/t10k-labels-idx1-ubyte",
"./data/MNIST/raw/mnist_test.csv", 10000)
test = pd.read_csv("./data/MNIST/raw/mnist_test.csv")
train_labels = np.array(train.loc[:, 'label'])
train_data = np.array(train.loc[:, train.columns != 'label'])
# visualize(0)
# check_count_of_each_label(train_labels)
# d = train_data.shape[1]
# d1 = 300
# # Shape of W1 is given by d1 * d where d1 is 300 and d is given by 784
# W1 = np.zeros((d1, d))
# # print(W1.shape)
# x1 = train_data[0]
# # print(x1.shape, x1)
# z1 = np.dot(W1, x1)
# # print(z1.shape, z1)
# a1 = sigmoid(z1)
# print('After sigmmoid activation shape is', a1.shape)
# W2 = np.zeros((10, d1))
# z2 = np.dot(W2, a1)
# # print(z2, z2.shape)
# y_pred = softmax(z2)
# # print(y_pred.shape)
# y_actual = train_labels[0]
# one_hot = np.zeros(10)
# one_hot[y_actual] = 1
# print(y_pred, one_hot)
# loss = - np.dot(one_hot, np.log(y_pred))
# print(loss)
###############################
train_data = np.reshape(train_data, [784, 60000])
train_label = np.zeros((10, 60000))
for col in range(60000):
val = train_labels[col]
for row in range(10):
if (val == row):
train_label[val, col] = 1
print("train_data shape="+str(np.shape(train_data)))
print("train_label shape="+str(np.shape(train_label)))
# n-layer model (n=3 including input and output layer)
layers_dims = [784, 300, 10]
len_update = len(layers_dims)
parameters = L_layer_model(train_data, train_label, layers_dims,
learning_rate=0.0005, num_iterations=35, print_cost=True)
print("training done")
| smit-1999/NaiveBayes | nn.py | nn.py | py | 9,372 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.title",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ma... |
15821968201 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
import psutil
import argparse
def monitorAvailableMemory(memory_upperlimit_percent):
"""
This function is used to monitor the memory utilization and throw an error
if it exceeds a preset value.
Arguments:
memory_upperlimit_percent: The upperlimit of the memory utilization (float)
"""
# Utilized memory
utilized_memory = psutil.virtual_memory().percent
if utilized_memory > memory_upperlimit_percent:
return True
return False
def ram_usage_watcher(mem_upper_limit):
pub = rospy.Publisher(
'data_capture/is_memory_usage_exceeded', Bool, queue_size=1)
rospy.init_node('ram_usage_watcher', anonymous=True)
rate = rospy.Rate(0.2) # Once every 5 seconds = 1/5 = 0.2 hz
while not rospy.is_shutdown():
# Check on free memory if exceeds 90% utilization
mem_usage = monitorAvailableMemory(
memory_upperlimit_percent=mem_upper_limit)
pub.publish(mem_usage)
rate.sleep()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mem-upper-limit', type=float, help='Memory utilization upper limit in percent',
default=90.0)
args, _ = parser.parse_known_args()
try:
ram_usage_watcher(args.mem_upper_limit)
except rospy.ROSInterruptException:
pass
| robotpt/ros-data-capture | src/tools/mem_use_watcher/scripts/watcher.py | watcher.py | py | 1,420 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "psutil.virtual_memory",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "std_msgs.msg.Bool",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "rospy.ini... |
75018787708 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 22:05:01 2022
@author: Marcin
"""
import numpy as np
import matplotlib.pyplot as plt
# Sigmoid activation function
def sigmoid(X):
out = 1.0 / (1.0 + np.exp(-X))
return out
# Dervative of sigmoid funcition
def sigmoid_derivative(X):
return sigmoid(X) * (1 - sigmoid(X))
# Forward progpagation
def forward_propagation(X, w1, w2, predict=False):
# Z - before apply activation function
# A - after apply activation function
# Calculate multiplication of input X and first layer weights
A1 = np.dot(X, w1)
# Apply sigmoid
Z1 = sigmoid(A1)
# Add bias and do the same as above
bias = np.ones(Z1.shape[0]).reshape(-1, 1)
Z1 = np.concatenate((bias, Z1), axis = 1)
A2 = np.dot(Z1, w2)
Z2 = sigmoid(A2)
# If precition - just return network prediction (Z2)
if predict:
return Z2
# If not - return all matrices before and after sigmoid
else:
return A1, Z1, A2, Z2
# Backpropagation
def backpropagation(A1, X, Z1, Z2, Y):
# Calculate difference betweend output and desired otput
out_diff = Z2 - Y
# Propagete inside of network (from back to front)
outDiff = np.dot(Z1.T, out_diff)
# Calculate dot product of out_diff and weights w2 multiplied by sigmoid derivative of A1
inside_diff = (out_diff.dot(w2[1:, :].T)) * sigmoid_derivative(A1)
# Dot product of X and inside_diff
insideDiff = np.dot(X.T, inside_diff)
return out_diff, insideDiff, outDiff
# Initialize weights
def initialize(input_size, output_size, hidden_units_w1, hidden_w2):
# Random weights
w1 = np.random.randn(input_size, hidden_units_w1)
w2 = np.random.randn(hidden_w2, output_size)
return w1, w2
# Define input data with bias and output values
X = np.array([[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])
Y = np.array([0, 1, 1, 0]).reshape(-1,1)
# Number of neurons in layers
input_size = X.shape[1]
hidden_units_w1 = 5
hidden_w2 = hidden_units_w1 + 1
output_size = 1
# Initialize random weights
w1, w2 = initialize(input_size, output_size, hidden_units_w1, hidden_w2)
# Define learning rate
learning_rate = 0.08
# Lists for costs (errors)
costs = []
# Desired number of epochs
epochs = 10000
# Y data shape - to weight modification
m = Y.shape[0]
# Training process
for i in range(1, epochs+1):
# Put out data into forword propagation
A1, Z1, A2, Z2 = forward_propagation(X, w1, w2)
# Backpropagation
out_diff, insideDiff, outDiff = backpropagation(A1, X, Z1, Z2, Y)
# Modify weights
w1 = w1 - learning_rate * (1/m) * insideDiff
w2 = w2 - learning_rate * (1/m) * outDiff
# Costs (differences betweend desired output) - mean
c = np.mean(np.abs(out_diff))
costs.append(c)
if i%100 == 0:
print('Iteration: %f, cost: %f' % (i, c))
print('Completed.')
# Predict:
pred = forward_propagation(X, w1, w2, True)
print('Pred. percentage:')
print(pred)
pred_rounded = np.round(pred)
print('Predictions:')
print(pred_rounded)
# Plot error curve
plt.plot(costs)
plt.xlabel('Iterations')
plt.ylabel('Error')
plt.title('Training erroro curve')
| MarcinJ7/kNN-implementation | NN.py | NN.py | py | 3,321 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.exp",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number":... |
20342943646 | import numpy as np
import matplotlib.pyplot as mplt
M = 10000
N = 50
s = np.zeros(M)
number_of_cols = 0
for i in range(M):
S_min = 0
S_plus = 0
for j in range(N):
chooser_of_state = np.random.randint(2)
if chooser_of_state == 1:
S_min += 1
else:
S_plus += 1
s_value = (S_plus - S_min)/2.
if s_value not in s:
number_of_cols += 1
s[i] = s_value
energy = -2*s #times mu and B too, but i assume them to be equal to 1
mplt.hist(energy, number_of_cols+1)
mplt.xlabel('value of s')
mplt.ylabel('probability of s')
mplt.show()
| tellefs/FYS2160 | Oblig1/oppgm.py | oppgm.py | py | 545 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.h... |
2116122344 | """
A command line interface to the qcfractal server.
"""
import argparse
import signal
import logging
from enum import Enum
from math import ceil
from typing import List, Optional
import tornado.log
import qcengine as qcng
import qcfractal
from pydantic import BaseModel, BaseSettings, validator, Schema
from . import cli_utils
from ..interface.util import auto_gen_docs_on_demand
__all__ = ["main"]
QCA_RESOURCE_STRING = '--resources process=1'
logger = logging.getLogger("qcfractal.cli")
class SettingsCommonConfig:
env_prefix = "QCA_"
case_insensitive = True
extra = "forbid"
class AdapterEnum(str, Enum):
dask = "dask"
pool = "pool"
parsl = "parsl"
class CommonManagerSettings(BaseSettings):
"""
The Common settings are the settings most users will need to adjust regularly to control the nature of
task execution and the hardware under which tasks are executed on. This block is often unique to each deployment,
user, and manager and will be the most commonly updated options, even as config files are copied and reused, and
even on the same platform/cluster.
"""
adapter: AdapterEnum = Schema(
AdapterEnum.pool,
description="Which type of Distributed adapter to run tasks through."
)
tasks_per_worker: int = Schema(
1,
description="Number of concurrent tasks to run *per Worker* which is executed. Total number of concurrent "
"tasks is this value times max_workers, assuming the hardware is available. With the "
"pool adapter, and/or if max_workers=1, tasks_per_worker *is* the number of concurrent tasks."
)
cores_per_worker: int = Schema(
qcng.config.get_global("ncores"),
description="Number of cores to be consumed by the Worker and distributed over the tasks_per_worker. These "
"cores are divided evenly, so it is recommended that quotient of cores_per_worker/tasks_per_worker "
"be a whole number else the core distribution is left up to the logic of the adapter. The default "
"value is read from the number of detected cores on the system you are executing on.",
gt=0
)
memory_per_worker: float = Schema(
qcng.config.get_global("memory"),
description="Amount of memory (in GB) to be consumed and distributed over the tasks_per_worker. This memory is "
"divided evenly, but is ultimately at the control of the adapter. Engine will only allow each of "
"its calls to consume memory_per_worker/tasks_per_worker of memory. Total memory consumed by this "
"manager at any one time is this value times max_workers. The default value is read "
"from the amount of memory detected on the system you are executing on.",
gt=0
)
max_workers: int = Schema(
1,
description="The maximum number of Workers which are allowed to be run at the same time. The total number of "
"concurrent tasks will maximize at this quantity times tasks_per_worker."
"The total number "
"of Jobs on a cluster which will be started is equal to this parameter in most cases, and should "
"be assumed 1 Worker per Job. Any exceptions to this will be documented. "
"In node exclusive mode this is equivalent to the maximum number of nodes which you will consume. "
"This must be a positive, non zero integer.",
gt=0
)
retries: int = Schema(
2,
description="Number of retries that QCEngine will attempt for RandomErrors detected when running "
"its computations. After this many attempts (or on any other type of error), the "
"error will be raised.",
ge=0
)
scratch_directory: Optional[str] = Schema(
None,
description="Scratch directory for Engine execution jobs."
)
verbose: bool = Schema(
False,
description="Turn on verbose mode or not. In verbose mode, all messages from DEBUG level and up are shown, "
"otherwise, defaults are all used for any logger."
)
class Config(SettingsCommonConfig):
pass
auto_gen_docs_on_demand(CommonManagerSettings)
class FractalServerSettings(BaseSettings):
"""
Settings pertaining to the Fractal Server you wish to pull tasks from and push completed tasks to. Each manager
supports exactly 1 Fractal Server to be in communication with, and exactly 1 user on that Fractal Server. These
can be changed, but only once the Manager is shutdown and the settings changed. Multiple Managers however can be
started in parallel with each other, but must be done as separate calls to the CLI.
Caution: The password here is written in plain text, so it is up to the owner/writer of the configuration file
to ensure its security.
"""
fractal_uri: str = Schema(
"localhost:7777",
description="Full URI to the Fractal Server you want to connect to"
)
username: Optional[str] = Schema(
None,
description="Username to connect to the Fractal Server with. When not provided, a connection is attempted "
"as a guest user, which in most default Servers will be unable to return results."
)
password: Optional[str] = Schema(
None,
description="Password to authenticate to the Fractal Server with (alongside the `username`)"
)
verify: Optional[bool] = Schema(
None,
description="Use Server-side generated SSL certification or not."
)
class Config(SettingsCommonConfig):
pass
auto_gen_docs_on_demand(FractalServerSettings)
class QueueManagerSettings(BaseSettings):
"""
Fractal Queue Manger settings. These are options which control the setup and execution of the Fractal Manager
itself.
"""
manager_name: str = Schema(
"unlabeled",
description="Name of this scheduler to present to the Fractal Server. Descriptive names help the server "
"identify the manager resource and assists with debugging."
)
queue_tag: Optional[str] = Schema(
None,
description="Only pull tasks from the Fractal Server with this tag. If not set (None/null), then pull untagged "
"tasks, which should be the majority of tasks. This option should only be used when you want to "
"pull very specific tasks which you know have been tagged as such on the server. If the server has "
"no tasks with this tag, no tasks will be pulled (and no error is raised because this is intended "
"behavior)."
)
log_file_prefix: Optional[str] = Schema(
None,
description="Full path to save a log file to, including the filename. If not provided, information will still "
"be reported to terminal, but not saved. When set, logger information is sent both to this file "
"and the terminal."
)
update_frequency: float = Schema(
30,
description="Time between heartbeats/update checks between this Manager and the Fractal Server. The lower this "
"value, the shorter the intervals. If you have an unreliable network connection, consider "
"increasing this time as repeated, consecutive network failures will cause the Manager to shut "
"itself down to maintain integrity between it and the Fractal Server. Units of seconds",
gt=0
)
test: bool = Schema(
False,
description="Turn on testing mode for this Manager. The Manager will not connect to any Fractal Server, and "
"instead submits netsts worth trial tasks per quantum chemistry program it finds. These tasks are "
"generated locally and do not need a running Fractal Server to work. Helpful for ensuring the "
"Manager is configured correctly and the quantum chemistry codes are operating as expected."
)
ntests: int = Schema(
5,
description="Number of tests to run if the `test` flag is set to True. Total number of tests will be this "
"number times the number of found quantum chemistry programs. Does nothing if `test` is False."
"If set to 0, then this submits no tests, but it will run through the setup and client "
"initialization.",
gt=-1
)
max_queued_tasks: Optional[int] = Schema(
None,
description="Generally should not be set. Number of tasks to pull from the Fractal Server to keep locally at "
"all times. If `None`, this is automatically computed as "
"`ceil(common.tasks_per_worker*common.max_workers*2.0) + 1`. As tasks are completed, the "
"local pool is filled back up to this value. These tasks will all attempt to be run concurrently, "
"but concurrent tasks are limited by number of cluster jobs and tasks per job. Pulling too many of "
"these can result in under-utilized managers from other sites and result in less FIFO returns. As "
"such it is recommended not to touch this setting in general as you will be given enough tasks to "
"fill your maximum throughput with a buffer (assuming the queue has them).",
gt=0
)
auto_gen_docs_on_demand(QueueManagerSettings)
class SchedulerEnum(str, Enum):
slurm = "slurm"
pbs = "pbs"
sge = "sge"
moab = "moab"
lsf = "lsf"
class AdaptiveCluster(str, Enum):
static = "static"
adaptive = "adaptive"
class ClusterSettings(BaseSettings):
"""
Settings tied to the cluster you are running on. These settings are mostly tied to the nature of the cluster
jobs you are submitting, separate from the nature of the compute tasks you will be running within them. As such,
the options here are things like wall time (per job), which Scheduler your cluster has (like PBS or SLURM),
etc. No additional options are allowed here.
"""
node_exclusivity: bool = Schema(
False,
description="Run your cluster jobs in node-exclusivity mode. This option may not be available to all scheduler "
"types and thus may not do anything. Related to this, the flags we have found for this option "
"may not be correct for your scheduler and thus might throw an error. You can always add the "
"correct flag/parameters to the `scheduler_options` parameter and leave this as False if you "
"find it gives you problems."
)
scheduler: SchedulerEnum = Schema(
None,
description="Option of which Scheduler/Queuing system your cluster uses. Note: not all scheduler options are "
"available with every adapter."
)
scheduler_options: List[str] = Schema(
[],
description="Additional options which are fed into the header files for your submitted jobs to your cluster's "
"Scheduler/Queuing system. The directives are automatically filled in, so if you want to set "
"something like '#PBS -n something', you would instead just do '-n something'. Each directive "
"should be a separate string entry in the list. No validation is done on this with respect to "
"valid directives so it is on the user to know what they need to set."
)
task_startup_commands: List[str] = Schema(
[],
description="Additional commands to be run before starting the Workers and the task distribution. This can "
"include commands needed to start things like conda environments or setting environment variables "
"before executing the Workers. These commands are executed first before any of the distributed "
"commands run and are added to the batch scripts as individual commands per entry, verbatim."
)
walltime: str = Schema(
"06:00:00",
description="Wall clock time of each cluster job started. Presented as a string in HH:MM:SS form, but your "
"cluster may have a different structural syntax. This number should be set high as there should "
"be a number of Fractal tasks which are run for each submitted cluster job. Ideally, the job "
"will start, the Worker will land, and the Worker will crunch through as many tasks as it can; "
"meaning the job which has a Worker in it must continue existing to minimize time spend "
"redeploying Workers."
)
adaptive: AdaptiveCluster = Schema(
AdaptiveCluster.adaptive,
description="Whether or not to use adaptive scaling of Workers or not. If set to 'static', a fixed number of "
"Workers will be started (and likely *NOT* restarted when the wall clock is reached). When set to "
"'adaptive' (the default), the distributed engine will try to adaptively scale the number of "
"Workers based on tasks in the queue. This is str instead of bool type variable in case more "
"complex adaptivity options are added in the future."
)
class Config(SettingsCommonConfig):
pass
@validator('scheduler', 'adaptive', pre=True)
def things_to_lcase(cls, v):
return v.lower()
auto_gen_docs_on_demand(ClusterSettings)
class SettingsBlocker(BaseSettings):
"""Helper class to auto block certain entries, overwrite hidden methods to access"""
_forbidden_set = set()
_forbidden_name = "SettingsBlocker"
def __init__(self, **kwargs):
"""
Enforce that the keys we are going to set remain untouched. Blocks certain keywords for the classes
they will be fed into, not whatever Fractal is using as keywords.
"""
bad_set = set(kwargs.keys()) & self._forbidden_set
if bad_set:
raise KeyError("The following items were set as part of {}, however, "
"there are other config items which control these in more generic "
"settings locations: {}".format(self._forbidden_name, bad_set))
super().__init__(**kwargs)
class Config(SettingsCommonConfig):
# This overwrites the base config to allow other keywords to be fed in
extra = "allow"
class DaskQueueSettings(SettingsBlocker):
"""
Settings for the Dask Cluster class. Values set here are passed directly into the Cluster objects based on the
`cluster.scheduler` settings. Although many values are set automatically from other settings, there are
some additional values such as `interface` and `extra` which are passed through to the constructor.
Valid values for this field are functions of your cluster.scheduler and no linting is done ahead of trying to pass
these to Dask.
NOTE: The parameters listed here are a special exception for additional features Fractal has engineered or
options which should be considered for some of the edge cases we have discovered. If you try to set a value
which is derived from other options in the YAML file, an error is raised and you are told exactly which one is
forbidden.
Please see the docs for the provider for more information.
"""
interface: Optional[str] = Schema(
None,
description="Name of the network adapter to use as communication between the head node and the compute node."
"There are oddities of this when the head node and compute node use different ethernet adapter "
"names and we have not figured out exactly which combination is needed between this and the "
"poorly documented `ip` keyword which appears to be for Workers, but not the Client."
)
extra: Optional[List[str]] = Schema(
None,
description="Additional flags which are fed into the Dask Worker CLI startup, can be used to overwrite "
"pre-configured options. Do not use unless you know exactly which flags to use."
)
lsf_units: Optional[str] = Schema(
None,
description="Unit system for an LSF cluster limits (e.g. MB, GB, TB). If not set, the units are "
"are attempted to be set from the `lsf.conf` file in the default locations. This does nothing "
"if the cluster is not LSF"
)
_forbidden_set = {"name", "cores", "memory", "processes", "walltime", "env_extra", "qca_resource_string"}
_forbidden_name = "dask_jobqueue"
auto_gen_docs_on_demand(DaskQueueSettings)
class ParslExecutorSettings(SettingsBlocker):
"""
Settings for the Parsl Executor class. This serves as the primary mechanism for distributing Workers to jobs.
In most cases, you will not need to set any of these options, as several options are automatically inferred
from other settings. Any option set here is passed through to the HighThroughputExecutor class of Parsl.
https://parsl.readthedocs.io/en/latest/stubs/parsl.executors.HighThroughputExecutor.html
NOTE: The parameters listed here are a special exception for additional features Fractal has engineered or
options which should be considered for some of the edge cases we have discovered. If you try to set a value
which is derived from other options in the YAML file, an error is raised and you are told exactly which one is
forbidden.
"""
address: Optional[str] = Schema(
None,
description="This only needs to be set in conditional cases when the head node and compute nodes use a "
"differently named ethernet adapter.\n\n"
"An address to connect to the main Parsl process which is reachable from the network in which "
"Workers will be running. This can be either a hostname as returned by hostname or an IP address. "
"Most login nodes on clusters have several network interfaces available, only some of which can be "
"reached from the compute nodes. Some trial and error might be necessary to identify what "
"addresses are reachable from compute nodes."
)
_forbidden_set = {"label", "provider", "cores_per_worker", "max_workers"}
_forbidden_name = "the parsl executor"
auto_gen_docs_on_demand(ParslExecutorSettings)
class ParslLauncherSettings(BaseSettings):
"""
Set the Launcher in a Parsl Provider, and its options, if not set, the defaults are used.
This is a rare use case where the ``launcher`` key of the Provider is needed to be set. Since it must be a class
first, you will need to specify the ``launcher_type`` options which is interpreted as the Class Name of the
Launcher to load and pass the rest of the options set here into it. Any unset key will just be left as defaults.
It is up to the user to consult the Parsl Docs for their desired Launcher's options and what they do.
The known launchers below are case-insensitive,
but if new launchers come out (or you are using a custom/developmental build of Parsl), then you can pass your
own Launcher in verbatim, with case sensitivity, and the Queue Manager will try to load it.
Known Launchers:
- ``SimpleLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.SimpleLauncher.html
- ``SingleNodeLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.SingleNodeLauncher.html
- ``SrunLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.SrunLauncher.html
- ``AprunLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.AprunLauncher.html
- ``SrunMPILauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.SrunMPILauncher.html
- ``GnuParallelLauncher``: https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.GnuParallelLauncher.html
- ``MpiExecLauncher`` : https://parsl.readthedocs.io/en/latest/stubs/parsl.launchers.MpiExecLauncher.html
"""
launcher_class: str = Schema(
...,
description="Class of Launcher to use. This is a setting unique to QCArchive which is then used to pass onto "
"the Provider's ``launcher`` setting and the remaining keys are passed to that Launcher's options."
)
def _get_launcher(self, launcher_base: str) -> 'Launcher':
launcher_lower = launcher_base.lower()
launcher_map = {
"simplelauncher": "SimpleLauncher",
"singlenodelauncher": "SingleNodeLauncher",
"srunlauncher": "SrunLauncher",
"aprunlauncher": "AprunLauncher",
"srunmpiLauncher": "SrunMPILauncher",
"gnuparallellauncher": "GnuParallelLauncher",
"mpiexeclauncher": "MpiExecLauncher"
}
launcher_string = launcher_map[launcher_lower] if launcher_lower in launcher_map else launcher_base
try:
launcher_load = cli_utils.import_module("parsl.launchers", package=launcher_string)
launcher = getattr(launcher_load, launcher_string)
except ImportError:
raise ImportError(f"Could not import Parsl Launcher: {launcher_base}. Please make sure you have Parsl "
f"installed and are requesting one of the launchers within the package.")
return launcher
def build_launcher(self):
"""Import and load the desired launcher"""
launcher = self._get_launcher(self.launcher_class)
return launcher(**self.dict(exclude={'launcher_class'}))
class Config(SettingsCommonConfig):
pass
auto_gen_docs_on_demand(ParslLauncherSettings)
class ParslProviderSettings(SettingsBlocker):
"""
Settings for the Parsl Provider class. Valid values for this field are functions of your cluster.scheduler and no
linting is done ahead of trying to pass these to Parsl.
Please see the docs for the provider information
NOTE: The parameters listed here are a special exception for additional features Fractal has engineered or
options which should be considered for some of the edge cases we have discovered. If you try to set a value
which is derived from other options in the YAML file, an error is raised and you are told exactly which one is
forbidden.
SLURM: https://parsl.readthedocs.io/en/latest/stubs/parsl.providers.SlurmProvider.html
PBS/Torque/Moab: https://parsl.readthedocs.io/en/latest/stubs/parsl.providers.TorqueProvider.html
SGE (Sun GridEngine): https://parsl.readthedocs.io/en/latest/stubs/parsl.providers.GridEngineProvider.html
"""
partition: str = Schema(
None,
description="The name of the cluster.scheduler partition being submitted to. Behavior, valid values, and even"
"its validity as a set variable are a function of what type of queue scheduler your specific "
"cluster has (e.g. this variable should NOT be present for PBS clusters). "
"Check with your Sys. Admins and/or your cluster documentation."
)
launcher: ParslLauncherSettings = Schema(
None,
description="The Parsl Launcher do use with your Provider. If left to ``None``, defaults are assumed (check "
"the Provider's defaults), otherwise this should be a dictionary requiring the option "
"``launcher_class`` as a str to specify which Launcher class to load, and the remaining settings "
"will be passed on to the Launcher's constructor."
)
_forbidden_set = {"nodes_per_block", "max_blocks", "worker_init", "scheduler_options", "wall_time"}
_forbidden_name = "parsl's provider"
auto_gen_docs_on_demand(ParslProviderSettings)
class ParslQueueSettings(BaseSettings):
"""
The Parsl-specific configurations used with the `common.adapter = parsl` setting. The parsl config is broken up into
a top level `Config` class, an `Executor` sub-class, and a `Provider` sub-class of the `Executor`.
Config -> Executor -> Provider. Each of these have their own options, and extra values fed into the
ParslQueueSettings are fed to the `Config` level.
It requires both `executor` and `provider` settings, but will default fill them in and often does not need
any further configuration which is handled by other settings in the config file.
"""
executor: ParslExecutorSettings = ParslExecutorSettings()
provider: ParslProviderSettings = ParslProviderSettings()
class Config(SettingsCommonConfig):
extra = "allow"
auto_gen_docs_on_demand(ParslQueueSettings)
class ManagerSettings(BaseModel):
"""
The config file for setting up a QCFractal Manager, all sub fields of this model are at equal top-level of the
YAML file. No additional top-level fields are permitted, but sub-fields may have their own additions.
Not all fields are required and many will depend on the cluster you are running, and the adapter you choose
to run on.
"""
common: CommonManagerSettings = CommonManagerSettings()
server: FractalServerSettings = FractalServerSettings()
manager: QueueManagerSettings = QueueManagerSettings()
cluster: Optional[ClusterSettings] = ClusterSettings()
dask: Optional[DaskQueueSettings] = DaskQueueSettings()
parsl: Optional[ParslQueueSettings] = ParslQueueSettings()
class Config:
extra = "forbid"
auto_gen_docs_on_demand(ManagerSettings)
def parse_args():
parser = argparse.ArgumentParser(
description='A CLI for a QCFractal QueueManager with a ProcessPoolExecutor, Dask, or Parsl backend. '
'The Dask and Parsl backends *requires* a config file due to the complexity of its setup. If a config '
'file is specified, the remaining options serve as CLI overwrites of the config.')
parser.add_argument("--config-file", type=str, default=None)
# Common settings
common = parser.add_argument_group('Common Adapter Settings')
common.add_argument(
"--adapter", type=str, help="The backend adapter to use, currently only {'dask', 'parsl', 'pool'} are valid.")
common.add_argument(
"--tasks-per-worker",
type=int,
help="The number of simultaneous tasks for the executor to run, resources will be divided evenly.")
common.add_argument("--cores-per-worker", type=int, help="The number of process for each executor's Workers")
common.add_argument("--memory-per-worker", type=int, help="The total amount of memory on the system in GB")
common.add_argument("--scratch-directory", type=str, help="Scratch directory location")
common.add_argument("--retries", type=int, help="Number of RandomError retries per task before failing the task")
common.add_argument("-v", "--verbose", action="store_true", help="Increase verbosity of the logger.")
# FractalClient options
server = parser.add_argument_group('FractalServer connection settings')
server.add_argument("--fractal-uri", type=str, help="FractalServer location to pull from")
server.add_argument("-u", "--username", type=str, help="FractalServer username")
server.add_argument("-p", "--password", type=str, help="FractalServer password")
server.add_argument(
"--verify",
type=str,
help="Do verify the SSL certificate, leave off (unset) for servers with custom SSL certificates.")
# QueueManager options
manager = parser.add_argument_group("QueueManager settings")
manager.add_argument("--manager-name", type=str, help="The name of the manager to start")
manager.add_argument("--queue-tag", type=str, help="The queue tag to pull from")
manager.add_argument("--log-file-prefix", type=str, help="The path prefix of the logfile to write to.")
manager.add_argument("--update-frequency", type=int, help="The frequency in seconds to check for complete tasks.")
manager.add_argument("--max-queued-tasks", type=int, help="Maximum number of tasks to hold at any given time. "
"Generally should not be set.")
# Additional args
optional = parser.add_argument_group('Optional Settings')
optional.add_argument("--test", action="store_true", help="Boot and run a short test suite to validate setup")
optional.add_argument(
"--ntests", type=int, help="How many tests per found program to run, does nothing without --test set")
optional.add_argument("--schema", action="store_true", help="Display the current Schema (Pydantic) for the YAML "
"config file and exit. This will always show the "
"most up-to-date schema. It will be presented in a "
"JSON-like format.")
# Move into nested namespace
args = vars(parser.parse_args())
def _build_subset(args, keys):
ret = {}
for k in keys:
v = args[k]
if v is None:
continue
ret[k] = v
return ret
# Stupid we cannot inspect groups
data = {
"common": _build_subset(args, {"adapter", "tasks_per_worker", "cores_per_worker", "memory_per_worker",
"scratch_directory", "retries", "verbose"}),
"server": _build_subset(args, {"fractal_uri", "password", "username", "verify"}),
"manager": _build_subset(args, {"max_queued_tasks", "manager_name", "queue_tag", "log_file_prefix",
"update_frequency", "test", "ntests"}),
# This set is for this script only, items here should not be passed to the ManagerSettings nor any other
# classes
"debug": _build_subset(args, {"schema"})
} # yapf: disable
if args["config_file"] is not None:
config_data = cli_utils.read_config_file(args["config_file"])
for name, subparser in [("common", common), ("server", server), ("manager", manager)]:
if name not in config_data:
continue
data[name] = cli_utils.argparse_config_merge(subparser, data[name], config_data[name], check=False)
for name in ["cluster", "dask", "parsl"]:
if name in config_data:
data[name] = config_data[name]
if data[name] is None:
# Handle edge case where None provided here is explicitly treated as
# "do not parse" by Pydantic (intended behavior) instead of the default empty dict
# being used instead. This only happens when a user sets in the YAML file
# the top level header and nothing below it.
data[name] = {}
return data
def main(args=None):
# Grab CLI args if not present
if args is None:
args = parse_args()
exit_callbacks = []
try:
if args["debug"]["schema"]:
print(ManagerSettings.schema_json(indent=2))
return # We're done, exit normally
except KeyError:
pass # Don't worry if schema isn't in the list
finally:
args.pop("debug", None) # Ensure the debug key is not present
# Construct object
settings = ManagerSettings(**args)
logger_map = {AdapterEnum.pool: "",
AdapterEnum.dask: "dask_jobqueue.core",
AdapterEnum.parsl: "parsl"}
if settings.common.verbose:
adapter_logger = logging.getLogger(logger_map[settings.common.adapter])
adapter_logger.setLevel("DEBUG")
logger.setLevel("DEBUG")
if settings.manager.log_file_prefix is not None:
tornado.options.options['log_file_prefix'] = settings.manager.log_file_prefix
# Clones the log to the output
tornado.options.options['log_to_stderr'] = True
tornado.log.enable_pretty_logging()
if settings.manager.test:
# Test this manager, no client needed
client = None
else:
# Connect to a specified fractal server
client = qcfractal.interface.FractalClient(
address=settings.server.fractal_uri, **settings.server.dict(skip_defaults=True, exclude={"fractal_uri"}))
# Figure out per-task data
cores_per_task = settings.common.cores_per_worker // settings.common.tasks_per_worker
memory_per_task = settings.common.memory_per_worker / settings.common.tasks_per_worker
if cores_per_task < 1:
raise ValueError("Cores per task must be larger than one!")
if settings.common.adapter == "pool":
from concurrent.futures import ProcessPoolExecutor
queue_client = ProcessPoolExecutor(max_workers=settings.common.tasks_per_worker)
elif settings.common.adapter == "dask":
dask_settings = settings.dask.dict(skip_defaults=True)
# Checks
if "extra" not in dask_settings:
dask_settings["extra"] = []
if QCA_RESOURCE_STRING not in dask_settings["extra"]:
dask_settings["extra"].append(QCA_RESOURCE_STRING)
# Scheduler opts
scheduler_opts = settings.cluster.scheduler_options.copy()
_cluster_loaders = {"slurm": "SLURMCluster", "pbs": "PBSCluster", "moab": "MoabCluster", "sge": "SGECluster",
"lsf": "LSFCluster"}
dask_exclusivity_map = {"slurm": "--exclusive",
"pbs": "-n",
"moab": "-n", # Less sure about this one
"sge": "-l exclusive=true",
"lsf": "-x",
}
if settings.cluster.node_exclusivity and dask_exclusivity_map[settings.cluster.scheduler] not in scheduler_opts:
scheduler_opts.append(dask_exclusivity_map[settings.cluster.scheduler])
# Create one construct to quickly merge dicts with a final check
dask_construct = {
"name": "QCFractal_Dask_Compute_Executor",
"cores": settings.common.cores_per_worker,
"memory": str(settings.common.memory_per_worker) + "GB",
"processes": settings.common.tasks_per_worker, # Number of workers to generate == tasks in this construct
"walltime": settings.cluster.walltime,
"job_extra": scheduler_opts,
"env_extra": settings.cluster.task_startup_commands,
**dask_settings}
try:
# Import the dask things we need
import dask_jobqueue
from dask.distributed import Client
cluster_module = cli_utils.import_module("dask_jobqueue",
package=_cluster_loaders[settings.cluster.scheduler])
cluster_class = getattr(cluster_module, _cluster_loaders[settings.cluster.scheduler])
if dask_jobqueue.__version__ < "0.5.0":
raise ImportError
except ImportError:
raise ImportError("You need`dask-jobqueue >= 0.5.0` to use the `dask` adapter")
cluster = cluster_class(**dask_construct)
# Setup up adaption
# Workers are distributed down to the cores through the sub-divided processes
# Optimization may be needed
workers = settings.common.tasks_per_worker * settings.common.max_workers
if settings.cluster.adaptive == AdaptiveCluster.adaptive:
cluster.adapt(minimum=0, maximum=workers, interval="10s")
else:
cluster.scale(workers)
queue_client = Client(cluster)
elif settings.common.adapter == "parsl":
scheduler_opts = settings.cluster.scheduler_options
if not settings.cluster.node_exclusivity:
raise ValueError("For now, QCFractal can only be run with Parsl in node exclusivity. This will be relaxed "
"in a future release of Parsl and QCFractal")
# Import helpers
_provider_loaders = {"slurm": "SlurmProvider",
"pbs": "TorqueProvider",
"moab": "TorqueProvider",
"sge": "GridEngineProvider",
"lsf": None}
if _provider_loaders[settings.cluster.scheduler] is None:
raise ValueError(f"Parsl does not know how to handle cluster of type {settings.cluster.scheduler}.")
# Headers
_provider_headers = {"slurm": "#SBATCH",
"pbs": "#PBS",
"moab": "#PBS",
"sge": "#$$",
"lsf": None
}
# Import the parsl things we need
try:
import parsl
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
from parsl.addresses import address_by_hostname
provider_module = cli_utils.import_module("parsl.providers",
package=_provider_loaders[settings.cluster.scheduler])
provider_class = getattr(provider_module, _provider_loaders[settings.cluster.scheduler])
provider_header = _provider_headers[settings.cluster.scheduler]
if parsl.__version__ < '0.8.0':
raise ImportError
except ImportError:
raise ImportError("You need `parsl >=0.8.0` to use the `parsl` adapter")
if _provider_loaders[settings.cluster.scheduler] == "moab":
logger.warning("Parsl uses its TorqueProvider for Moab clusters due to the scheduler similarities. "
"However, if you find a bug with it, please report to the Parsl and QCFractal developers so "
"it can be fixed on each respective end.")
# Setup the providers
# Create one construct to quickly merge dicts with a final check
common_parsl_provider_construct = {
"init_blocks": 0, # Update this at a later time of Parsl
"max_blocks": settings.common.max_workers,
"walltime": settings.cluster.walltime,
"scheduler_options": f'{provider_header} ' + f'\n{provider_header} '.join(scheduler_opts) + '\n',
"nodes_per_block": 1,
"worker_init": '\n'.join(settings.cluster.task_startup_commands),
**settings.parsl.provider.dict(skip_defaults=True, exclude={"partition", "launcher"})
}
if settings.parsl.provider.launcher:
common_parsl_provider_construct["launcher"] = settings.parsl.provider.launcher.build_launcher()
if settings.cluster.scheduler == "slurm":
# The Parsl SLURM constructor has a strange set of arguments
provider = provider_class(settings.parsl.provider.partition,
exclusive=settings.cluster.node_exclusivity,
**common_parsl_provider_construct)
else:
provider = provider_class(**common_parsl_provider_construct)
parsl_executor_construct = {
"label": "QCFractal_Parsl_{}_Executor".format(settings.cluster.scheduler.title()),
"cores_per_worker": cores_per_task,
"max_workers": settings.common.tasks_per_worker * settings.common.max_workers,
"provider": provider,
"address": address_by_hostname(),
**settings.parsl.executor.dict(skip_defaults=True)}
queue_client = Config(
executors=[HighThroughputExecutor(**parsl_executor_construct)])
else:
raise KeyError("Unknown adapter type '{}', available options: {}.\n"
"This code should also be unreachable with pydantic Validation, so if "
"you see this message, please report it to the QCFractal GitHub".format(
settings.common.adapter, [getattr(AdapterEnum, v).value for v in AdapterEnum]))
# Build out the manager itself
# Compute max tasks
max_concurrent_tasks = settings.common.tasks_per_worker * settings.common.max_workers
if settings.manager.max_queued_tasks is None:
# Tasks * jobs * buffer + 1
max_queued_tasks = ceil(max_concurrent_tasks * 2.00) + 1
else:
max_queued_tasks = settings.manager.max_queued_tasks
manager = qcfractal.queue.QueueManager(
client,
queue_client,
max_tasks=max_queued_tasks,
queue_tag=settings.manager.queue_tag,
manager_name=settings.manager.manager_name,
update_frequency=settings.manager.update_frequency,
cores_per_task=cores_per_task,
memory_per_task=memory_per_task,
scratch_directory=settings.common.scratch_directory,
retries=settings.common.retries,
verbose=settings.common.verbose
)
# Set stats correctly since we buffer the max tasks a bit
manager.statistics.max_concurrent_tasks = max_concurrent_tasks
# Add exit callbacks
for cb in exit_callbacks:
manager.add_exit_callback(cb[0], *cb[1], **cb[2])
# Either startup the manager or run until complete
if settings.manager.test:
success = manager.test(settings.manager.ntests)
if success is False:
raise ValueError("Testing was not successful, failing.")
else:
for signame in {"SIGHUP", "SIGINT", "SIGTERM"}:
def stop(*args, **kwargs):
manager.stop(signame)
raise KeyboardInterrupt()
signal.signal(getattr(signal, signame), stop)
# Blocks until signal
try:
manager.start()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| yudongqiu/QCFractal | qcfractal/cli/qcfractal_manager.py | qcfractal_manager.py | py | 42,285 | python | en | code | null | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseSettings",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "pydantic.Schema",
... |
24219583345 | # -*- coding: utf-8 -*-
"""
Created on 2022/9/23
@author: nhsiao
2022/9/5 avg_rsrp 改成 c_rsrp, 圖片從 2022/8/27閞始
2022/9/29 c_rsrp 改成 pos_first_rsrp, 圖片從 2022/9/23 閞始
"""
import cx_Oracle
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import dates as mpl_dates
import gc
import gzip
from datetime import datetime, timedelta
import func
import warnings
warnings.filterwarnings('ignore','.*Failed to load HostKeys.*')
warnings.filterwarnings('ignore')
# import datetime
# today = datetime.date.today().strftime("%Y-%m-%d")
code_folder = "D:\\Nicole\\python\\cottCNN\\"
# keep process time
now = datetime.now()
txt = 'generateImg.py, 上次更新時間,From:' + str(now)
df = pd.DataFrame([txt], index=['UpdateTime'])
df.to_csv(code_folder+'logCottCNN.csv', mode='a',header=False)
df_site = pd.DataFrame(data=None, columns=['itt_id','ittid_lat','ittid_long','site' ,'site_dis','site_lat' ,'site_long','site_type','pos_first_rsrp_mean', 'pos_first_rsrp_count', 'c_prbutil_mean', 'c_prbutil_count', 'c_rssi_mean', 'c_rssi_count', 'dl_tput_mean', 'dl_tput_count', 'pos_last_rsrq_mean', 'pos_last_rsrq_count', 'end_cqi_mean','end_cqi_count'])
df_site_ori = pd.DataFrame(data=None, columns=['itt_id', 'site1','site2', 'site3'])
today = datetime.today().strftime("%Y-%m-%d")
yesterday = datetime.today() - timedelta(days=1)
yesDay = yesterday.strftime('%Y%m%d')
yesDate = yesterday.strftime('%Y-%m-%d')
# today = "2022-09-29"
# yesDay = "20221129"
# yesDate = "2022-11-29"
localDir = code_folder+'data\\'
sFile = 'TT_Data_'+ yesDay +'.csv.gz'
print(localDir, sFile)
func.sftp(sFile, localDir)
# 今日rawData
with gzip.open(localDir + sFile, 'rb') as f:
rawCott = pd.read_csv(f)
sql = 'SELECT ITT_ID, to_char(CREATE_DATE,\'YYYY-MM-DD HH24\')||\':00\' event_date, to_char(CREATE_DATE-1,\'YYYY-MM-DD HH24\')||\':00\' event_date_24hr, to_char(CREATE_DATE-4,\'YYYY-MM-DD HH24\')||\':00\' event_start_date, GIS_X_84, GIS_Y_84 FROM ITSMRPT.RPT_COTT@ITSMRPT_NEW WHERE trunc(CREATE_DATE) = TO_DATE(\''+ yesDate +'\',\'YYYY-MM-DD\') union SELECT ITT_ID, to_char(CREATE_DATE,\'YYYY-MM-DD HH24\')||\':00\' event_date, to_char(CREATE_DATE-1,\'YYYY-MM-DD HH24\')||\':00\' event_date_24hr, to_char(CREATE_DATE-4,\'YYYY-MM-DD HH24\')||\':00\' event_start_date, GIS_X_84, GIS_Y_84 FROM ITSMRPT.RPT_COTT_APP@ITSMRPT_NEW WHERE trunc(CREATE_DATE) = TO_DATE(\''+ yesDate +'\',\'YYYY-MM-DD\')'
connection = cx_Oracle.connect('nocadm/noc2512@192.168.20.35/nois3g')
df1 = pd.read_sql(sql, con=connection)
del df
pd.options.mode.chained_assignment = None # default='warn'
df3 = rawCott.merge(df1, left_on="itt_id", right_on="ITT_ID", how='left', suffixes=('_1', '_2'))
df3['start_time'] = pd.to_datetime(df3['start_time'], format='%Y-%m-%d %H:%M:%S')
condition = "`start_time` <= `EVENT_DATE` and start_time >= `EVENT_START_DATE`"
df_raw0 = df3.query(condition, engine='python')
df_raw = df_raw0[['itt_id','site_id', 'GIS_X_84', 'GIS_Y_84','c_lat','c_long', 'pos_first_lat',
'pos_first_long', 'n_type', 'start_time','EVENT_START_DATE','EVENT_DATE', 'EVENT_DATE_24HR','duration','pos_first_rsrp', 'c_prbutil', 'c_rssi','end_cqi','call_type','dl_volume','dl_tput','pos_last_rsrq']]
df_raw["start_time"] = pd.to_datetime(df_raw["start_time"])
df_raw['EVENT_START_DATE'] = pd.to_datetime(df_raw['EVENT_START_DATE'])
df_raw['EVENT_DATE'] = pd.to_datetime(df_raw['EVENT_DATE'])
df_raw['EVENT_DATE_24HR'] = pd.to_datetime(df_raw['EVENT_DATE_24HR'])
del rawCott
del df3
del df_raw0
params = ["pos_first_rsrp", "c_prbutil", "c_rssi","end_cqi","pos_last_rsrq", "dl_tput"]
df_raw['dl_volume'].fillna(value=0, inplace=True)
df_raw['dl_volume'] = df_raw['dl_volume'].astype('int64')
df_raw['dl_tput'].fillna(value=0, inplace=True)
df_raw['dl_tput'] = df_raw['dl_tput'].astype('int64')
df_raw['itt_id'] = df_raw['itt_id'].astype('str')
df_raw['pos_first_rsrp_color'] = df_raw.apply(func.get_rsrp_color, axis=1).copy()
df_raw['c_prbutil_color'] = df_raw.apply(func.get_prb_color, axis=1).copy()
df_raw['c_rssi_color'] = df_raw.apply(func.get_rssi_color, axis=1).copy()
df_raw['end_cqi_color'] = df_raw.apply(func.get_cqi_color, axis=1).copy()
df_raw['dl_tput_color'] = df_raw.apply(func.get_dltput_color, axis=1).copy()
df_raw['pos_last_rsrq_color'] = df_raw.apply(func.get_rsrq_color, axis=1).copy()
df_raw['duration2'] = df_raw.apply(func.get_duration, axis=1).copy()
df_raw['times'] = df_raw.apply(func.get_times, axis=1).copy()
df_raw['GIS_Y_84'] = df_raw['GIS_Y_84'].astype('float64')
df_raw['GIS_X_84'] = df_raw['GIS_X_84'].astype('float64')
df_raw['c_lat'] = df_raw['c_lat'].astype('float64')
df_raw['c_long'] = df_raw['c_long'].astype('float64')
df_raw['tt_site_distance'] = df_raw.apply(lambda x: func.LLs2Dist(x['GIS_Y_84'],x['GIS_X_84'],x['c_lat'],x['c_long']) , axis=1).copy()
df_raw['user_site_distance'] = df_raw.apply(lambda x: func.LLs2Dist(x['pos_first_lat'],x['pos_first_long'],x['c_lat'],x['c_long']) , axis=1).copy()
df_raw['tt_user_distance'] = df_raw.apply(lambda x: func.LLs2Dist(x['pos_first_lat'],x['pos_first_long'],x['GIS_Y_84'],x['GIS_X_84']) , axis=1).copy()
# df_raw_test = df_raw[df_raw['tt_user_distance']<2]
itt_id = df_raw['itt_id'].unique()
for i in range(len(itt_id)):
condition = "`itt_id` == '" + itt_id[i] + "'"
df = df_raw.query(condition, engine='python')
#取得停留最久的基站
site1, tt1_lat, tt1_long, site1_lat, site1_long, bad_site1, bad_site1_lat, bad_site1_long, pos_first_rsrp_mean1,c_prbutil_mean1,c_rssi_mean1,dl_tput_mean1,pos_last_rsrq_mean1,end_cqi_mean1,pos_first_rsrp_count1,c_prbutil_count1,c_rssi_count1,dl_tput_count1,pos_last_rsrq_count1,end_cqi_count1, pos_first_rsrp_bmean1,c_prbutil_bmean1,c_rssi_bmean1,dl_tput_bmean1,pos_last_rsrq_bmean1,end_cqi_bmean1,pos_first_rsrp_bcount1,c_prbutil_bcount1,c_rssi_bcount1,dl_tput_bcount1,pos_last_rsrq_bcount1,end_cqi_bcount1 = func.get_site_id(df, 8, 12)
site2, tt2_lat, tt2_long, site2_lat, site2_long, bad_site2, bad_site2_lat, bad_site2_long, pos_first_rsrp_mean2,c_prbutil_mean2,c_rssi_mean2,dl_tput_mean2,pos_last_rsrq_mean2,end_cqi_mean2,pos_first_rsrp_count2,c_prbutil_count2,c_rssi_count2,dl_tput_count2,pos_last_rsrq_count2,end_cqi_count2, pos_first_rsrp_bmean2,c_prbutil_bmean2,c_rssi_bmean2,dl_tput_bmean2,pos_last_rsrq_bmean2,end_cqi_bmean2,pos_first_rsrp_bcount2,c_prbutil_bcount2,c_rssi_bcount2,dl_tput_bcount2,pos_last_rsrq_bcount2,end_cqi_bcount2 = func.get_site_id(df, 12, 18)
site3, tt3_lat, tt3_long, site3_lat, site3_long, bad_site3, bad_site3_lat, bad_site3_long, pos_first_rsrp_mean3,c_prbutil_mean3,c_rssi_mean3,dl_tput_mean3,pos_last_rsrq_mean3,end_cqi_mean3,pos_first_rsrp_count3,c_prbutil_count3,c_rssi_count3,dl_tput_count3,pos_last_rsrq_count3,end_cqi_count3, pos_first_rsrp_bmean3,c_prbutil_bmean3,c_rssi_bmean3,dl_tput_bmean3,pos_last_rsrq_bmean3,end_cqi_bmean3,pos_first_rsrp_bcount3,c_prbutil_bcount3,c_rssi_bcount3,dl_tput_bcount3,pos_last_rsrq_bcount3,end_cqi_bcount3 = func.get_site_id(df, 18, 24)
site1_dis = ""
site2_dis = ""
site3_dis = ""
bad_site1_dis = ""
bad_site2_dis = ""
bad_site3_dis = ""
# if len(site1_lat) > 0:
if site1_lat:
# site1_dis = format(func.LLs2Dist(tt1_lat, tt1_long, site1_lat, site1_long),'.2f')
site1_dis = func.round_v2(func.LLs2Dist(tt1_lat, tt1_long, site1_lat, site1_long),3)
if site2_lat:
site2_dis = func.round_v2(func.LLs2Dist(tt2_lat, tt2_long, site2_lat, site2_long),3)
if site3_lat:
site3_dis = func.round_v2(func.LLs2Dist(tt3_lat, tt3_long, site3_lat, site3_long),3)
if bad_site1_lat:
bad_site1_dis = func.round_v2(func.LLs2Dist(tt1_lat, tt1_long, bad_site1_lat, bad_site1_long),3)
if bad_site2_lat:
bad_site2_dis = func.round_v2(func.LLs2Dist(tt2_lat, tt2_long, bad_site2_lat, bad_site2_long),3)
if bad_site3_lat:
bad_site3_dis = func.round_v2(func.LLs2Dist(tt3_lat, tt3_long, bad_site3_lat, bad_site3_long),3)
site_arr = [site1, site2, site3, bad_site1, bad_site2, bad_site3]
ittid_lat_arr = [tt1_lat, tt2_lat, tt3_lat, tt1_lat, tt2_lat, tt3_lat]
ittid_long_arr = [tt1_long, tt2_long, tt3_long, tt1_long, tt2_long, tt3_long]
site_dis_arr = [site1_dis, site2_dis, site3_dis, bad_site1_dis, bad_site2_dis, bad_site3_dis]
site_lat_arr = [site1_lat, site2_lat, site3_lat, bad_site1_lat, bad_site2_lat, bad_site3_lat]
site_long_arr = [site1_long, site2_long, site3_long, bad_site1_long, bad_site2_long, bad_site3_long]
site_type_arr = ['time1', 'time2', 'time3', 'btime1', 'btime2', 'btime3']
#6-1參數
rsrp_mean_arr = [pos_first_rsrp_mean1, pos_first_rsrp_mean2, pos_first_rsrp_mean3, pos_first_rsrp_bmean1, pos_first_rsrp_bmean2, pos_first_rsrp_bmean3]
rsrp_count_arr = [pos_first_rsrp_count1, pos_first_rsrp_count2, pos_first_rsrp_count3,pos_first_rsrp_bcount1, pos_first_rsrp_bcount2, pos_first_rsrp_bcount3]
#6-2參數
prbutil_mean_arr = [c_prbutil_mean1, c_prbutil_mean2, c_prbutil_mean3, c_prbutil_bmean1, c_prbutil_bmean2, c_prbutil_bmean3]
prbutil_count_arr = [c_prbutil_count1, c_prbutil_count2, c_prbutil_count3, c_prbutil_bcount1, c_prbutil_bcount2, c_prbutil_bcount3]
#6-3參數
rssi_mean_arr = [c_rssi_mean1, c_rssi_mean2, c_rssi_mean3, c_rssi_bmean1, c_rssi_bmean2, c_rssi_bmean3]
rssi_count_arr = [c_rssi_count1, c_rssi_count2, c_rssi_count3, c_rssi_bcount1, c_rssi_bcount2, c_rssi_bcount3]
#6-4參數
dltput_mean_arr = [dl_tput_mean1, dl_tput_mean2, dl_tput_mean3, dl_tput_bmean1, dl_tput_bmean2, dl_tput_bmean3]
dltput_count_arr = [dl_tput_count1, dl_tput_count2, dl_tput_count3, dl_tput_bcount1, dl_tput_bcount2, dl_tput_bcount3]
#6-5參數
rsrq_mean_arr = [pos_last_rsrq_mean1, pos_last_rsrq_mean2, pos_last_rsrq_mean3, pos_last_rsrq_bmean1, pos_last_rsrq_bmean2, pos_last_rsrq_bmean3]
rsrq_count_arr = [pos_last_rsrq_count1, pos_last_rsrq_count2, pos_last_rsrq_count3, pos_last_rsrq_bcount1, pos_last_rsrq_bcount2, pos_last_rsrq_bcount3]
#6-6參數
cqi_mean_arr = [end_cqi_mean1, end_cqi_mean2, end_cqi_mean3, end_cqi_bmean1, end_cqi_bmean2, end_cqi_bmean3]
cqi_count_arr = [end_cqi_count1, end_cqi_count2, end_cqi_count3, end_cqi_bcount1, end_cqi_bcount2, end_cqi_bcount3]
for a in range(len(site_arr)):
df_site = df_site.append({'ittid' :itt_id[i]
, 'ittid_lat' : ittid_lat_arr[a]
, 'ittid_long' : ittid_long_arr[a]
, 'site' : site_arr[a]
, 'site_dis' : site_dis_arr[a]
, 'site_lat' : site_lat_arr[a]
, 'site_long' : site_long_arr[a]
, 'site_type' : site_type_arr[a]
, 'pos_first_rsrp_mean' : rsrp_mean_arr[a]
, 'pos_first_rsrp_count' : rsrp_count_arr[a]
, 'c_prbutil_mean' : prbutil_mean_arr[a]
, 'c_prbutil_count' : prbutil_count_arr[a]
, 'c_rssi_mean' : rssi_mean_arr[a]
, 'c_rssi_count' : rssi_count_arr[a]
, 'dl_tput_mean' : dltput_mean_arr[a]
, 'dl_tput_count' : dltput_count_arr[a]
, 'pos_last_rsrq_mean' : rsrq_mean_arr[a]
, 'pos_last_rsrq_count' : rsrq_count_arr[a]
, 'end_cqi_mean' : cqi_mean_arr[a]
, 'end_cqi_count' : cqi_count_arr[a]
} , ignore_index=True)
df_site_ori = df_site_ori.append({'itt_id' :itt_id[i]
, 'site1' : site1
, 'site2' : site2
, 'site3' : site3
, 'site1_dis' : site1_dis
, 'site2_dis' : site2_dis
, 'site3_dis' : site3_dis
} , ignore_index=True)
print(f)
print(df.shape[0])
#碓認資料完整性
x0 = df.shape[0]
x1 = df.c_prbutil.dropna().shape[0]
x2 = df.pos_first_rsrp.dropna().shape[0]
x3 = df.c_rssi.dropna().shape[0]
if x1 <= 20 and x2 <= 20 and x3 <= 20 :
continue
plt.close('all')
fig = plt.figure()
plt.clf()
fig, ax = plt.subplots(len(params), 1, sharex=True, figsize=(10, 13))
for t in range(len(params)):
print(t)
print(params[t])
condition = "`itt_id` == '" + itt_id[i] + "' and " + params[t] + "_color !='white'"
df = df_raw.query(condition, engine='python').reset_index()
# print(f)
# print(df.shape[0])
try :
if params[t] == 'dl_volume' or params[t] == 'dl_tput':
ax[t].bar(x=df['start_time'], height=df[params[t]].astype(int),
bottom=0,color=df[params[t] + '_color'], width =0.05, alpha=0.5)
#, edgecolor='grey'
plt.ylim(0, 20)
ax[t].set_ylabel(params[t].upper(), fontsize=14)
#matplotlib.pyplot.ylim(top=top_value)
else:
ax[t].scatter(x=df['start_time'],
y=df[params[t]],
s=df['duration'],
alpha=0.5,
c=df[params[t] + '_color'],
cmap='viridis', )
if params[t] == 'end_cqi' :
plt.ylim(0, 15)
# ax[t].set_ylabel(params[t].upper().split("_", 1)[1], fontsize=14)
ax[t].set_ylabel(params[t].upper(), fontsize=14)
fig.tight_layout()
# reasonFolder = ""
# reasonFolder = reason_map.get(itt_id[i], "")
# DataTypeFolder = "image_west"
# for testing data
DataTypeFolder = "D:\\Nicole\\Laravel\\www\\public\\cott_images"
# print(x0 , '--x0')
# print(x1 , '--x1')
# print(x2 , '--x2')
# print(x3 , '--x3')
# if reasonFolder == "" :
# reasonFolder = "CantBeMapped"
# X軸(時間), 不需呈現
# locator.MAXTICKS = 40000
# ax[t].xaxis.set_major_locator(locator)
plt.gcf().autofmt_xdate()
date_format = mpl_dates.DateFormatter('%m-%d %H:00')
hours = mpl_dates.HourLocator(interval = 6)
plt.gca().xaxis.set_major_locator(hours)
plt.gca().xaxis.set_major_formatter(date_format)
# plt.xlabel('Time')
plt.ylabel(params[t].upper())
plt.gca().set_xlim(pd.to_datetime(df['EVENT_START_DATE'][0], format = '%Y-%m-%d %H:%M'),
pd.to_datetime(df['EVENT_DATE'][0], format = '%Y-%m-%d %H:%M'))
# print('.\\'+DataTypeFolder+'\\' + itt_id[i] + '.png')
# fig.savefig('.\\'+DataTypeFolder+'\\' + itt_id[i] + '.png')
print(DataTypeFolder+'\\' + itt_id[i] + '.png')
#資料不足,分開放, Today(訓練)、cott_images都不加入, 後再sftp上傳即可
if x1 <= 10 or x2 <= 10 or x3 <= 10 :
DataTypeFolder = DataTypeFolder + "_datainsufficient"
else:
fig.savefig(DataTypeFolder+'_today\\' + itt_id[i] + '.png')#上傳使用
fig.savefig(DataTypeFolder+'\\' + itt_id[i] + '.png')
# for testing data
# print('./image_0705/' + itt_id[i] + '.png')
# fig.savefig('./image_0705/' + itt_id[i] + '.png')
# clear the image in memory and clear axes, and in order to reduce the memory occupation
# plt.clf()
# plt.close(fig)
# plt.close('all')
# del fig
# if params[t]=='cell_rsrp' :
# plt.gca().invert_yaxis()
# plt.rcParams['font.sans-serif'] = ['Microsoft JhengHei']
# plt.rcParams['axes.unicode_minus'] = False
# plt.title('客戶軌跡與網路訊號')
except Exception as e:
print('error')
print(params[t])
print(e)
# continue
# del df_raw0
del df_raw
del df
del fig
# print ("\ngarbage len", len(gc.garbage))
# print ("garbages:", gc.garbage)
gc.collect()
# keep record time
now = datetime.now()
txt = 'generateImg.py, 上次更新時間,To:' + str(now)
df = pd.DataFrame([txt], index=['UpdateTime'])
df.to_csv(code_folder+'logCottCNN.csv', mode='a',header=False)
df_site_ori.to_csv(code_folder+'sitelist.csv', mode='a',index=False)
df_site.to_csv(code_folder+'sitelist_new.csv', mode='a',index=False)
df_site = df_site[df_site['site'].notna()]
# 倒入ORACLE
for i, j in df_site.iterrows():
func.insert_orcl(j['ittid'], j['ittid_lat'], j['ittid_long'], j['site'], j['site_dis'], j['site_lat'], j['site_long'], j['site_type'], j['pos_first_rsrp_mean'], j['pos_first_rsrp_count'], j['c_prbutil_mean'], j['c_prbutil_count'], j['c_rssi_mean'], j['c_rssi_count'], j['dl_tput_mean'], j['dl_tput_count'], j['pos_last_rsrq_mean'], j['pos_last_rsrq_count'], j['end_cqi_mean'], j['end_cqi_count'])
| tonhsiao/cnn_cbam | CNN_CBAM_Daily/generateImg.py | generateImg.py | py | 18,040 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": ... |
72532680189 | # pylint:disable=protected-access
# pylint:disable=redefined-outer-name
from collections.abc import Awaitable, Callable
from pathlib import Path
from typing import AsyncContextManager
import pytest
from aiopg.sa.engine import Engine
from faker import Faker
from models_library.api_schemas_storage import FileUploadSchema
from models_library.basic_types import SHA256Str
from models_library.projects_nodes_io import SimcoreS3FileID
from models_library.users import UserID
from pydantic import ByteSize, parse_obj_as
from simcore_service_storage import db_file_meta_data
from simcore_service_storage.models import FileMetaData
from simcore_service_storage.s3 import get_s3_client
from simcore_service_storage.simcore_s3_dsm import SimcoreS3DataManager
pytest_simcore_core_services_selection = ["postgres"]
pytest_simcore_ops_services_selection = ["adminer"]
@pytest.fixture
def file_size() -> ByteSize:
return parse_obj_as(ByteSize, "1")
@pytest.fixture
def mock_copy_transfer_cb() -> Callable[[int], None]:
def copy_transfer_cb(copied_bytes: int) -> None:
...
return copy_transfer_cb
async def test__copy_path_s3_s3(
simcore_s3_dsm: SimcoreS3DataManager,
directory_with_files: Callable[..., AsyncContextManager[FileUploadSchema]],
upload_file: Callable[[ByteSize, str], Awaitable[tuple[Path, SimcoreS3FileID]]],
file_size: ByteSize,
user_id: UserID,
mock_copy_transfer_cb: Callable[[int], None],
aiopg_engine: Engine,
):
def _get_dest_file_id(src: SimcoreS3FileID) -> SimcoreS3FileID:
return parse_obj_as(SimcoreS3FileID, f"{Path(src).parent}/the-copy")
async def _copy_s3_path(s3_file_id_to_copy: SimcoreS3FileID) -> None:
async with aiopg_engine.acquire() as conn:
exiting_fmd = await db_file_meta_data.get(conn, s3_file_id_to_copy)
await simcore_s3_dsm._copy_path_s3_s3( # noqa: SLF001
user_id=user_id,
src_fmd=exiting_fmd,
dst_file_id=_get_dest_file_id(s3_file_id_to_copy),
bytes_transfered_cb=mock_copy_transfer_cb,
)
async def _count_files(s3_file_id: SimcoreS3FileID, expected_count: int) -> None:
files = await get_s3_client(simcore_s3_dsm.app).list_files(
simcore_s3_dsm.simcore_bucket_name, prefix=s3_file_id
)
assert len(files) == expected_count
# using directory
FILE_COUNT = 4
SUBDIR_COUNT = 5
async with directory_with_files(
dir_name="some-random",
file_size_in_dir=file_size,
subdir_count=SUBDIR_COUNT,
file_count=FILE_COUNT,
) as directory_file_upload:
assert len(directory_file_upload.urls) == 1
assert directory_file_upload.urls[0].path
s3_object = directory_file_upload.urls[0].path.lstrip("/")
s3_file_id_dir_src = parse_obj_as(SimcoreS3FileID, s3_object)
s3_file_id_dir_dst = _get_dest_file_id(s3_file_id_dir_src)
await _count_files(s3_file_id_dir_dst, expected_count=0)
await _copy_s3_path(s3_file_id_dir_src)
await _count_files(s3_file_id_dir_dst, expected_count=FILE_COUNT * SUBDIR_COUNT)
# using a single file
_, simcore_file_id = await upload_file(file_size, "a_file_name")
await _copy_s3_path(simcore_file_id)
async def test_upload_and_search(
simcore_s3_dsm: SimcoreS3DataManager,
upload_file: Callable[..., Awaitable[tuple[Path, SimcoreS3FileID]]],
file_size: ByteSize,
user_id: UserID,
faker: Faker,
):
checksum: SHA256Str = parse_obj_as(SHA256Str, faker.sha256())
_, _ = await upload_file(file_size, "file1", sha256_checksum=checksum)
_, _ = await upload_file(file_size, "file2", sha256_checksum=checksum)
files: list[FileMetaData] = await simcore_s3_dsm.search_owned_files(
user_id=user_id, file_id_prefix="", sha256_checksum=checksum
)
assert len(files) == 2
for file in files:
assert file.sha256_checksum == checksum
assert file.file_name in {"file1", "file2"}
| ITISFoundation/osparc-simcore | services/storage/tests/unit/test_simcore_s3_dsm.py | test_simcore_s3_dsm.py | py | 4,006 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "pydantic.parse_obj_as",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pydantic.ByteSize",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "pytest.fixture",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pydan... |
73944977468 | from difflib import SequenceMatcher
from elasticsearch import Elasticsearch
import string
INDEX = 'video-search'
DOC_TYPE = 'video'
es = Elasticsearch(['elasticsearch:9200'])
def index_video(body):
es.index(index=INDEX, doc_type=DOC_TYPE, body=body)
es.indices.refresh(index=INDEX)
def delete_index():
es.indices.delete(index=INDEX, ignore=[400, 404])
def search_videos(query):
es_query = {
'query': {
'multi_match': {
'query': query,
'fields': ['transcript']
},
},
'highlight': {
'fields': {
'text': {'type': 'plain',
'number_of_fragments': 3,
'fragment_size': 30
}
}
}
}
search_res = es.search(index=INDEX, body=es_query)
return search_res['hits']['hits']
def find_matches_in_string(haystack, needle):
needle = needle.lower()
haystack = haystack.lower()
from spacy.matcher import PhraseMatcher
from spacy.lang.en import English
nlp = English()
matcher = PhraseMatcher(nlp.vocab)
matcher.add('query', None, nlp(needle))
doc = nlp(haystack)
matches = matcher(doc)
return matches | colanconnon/cs410project | cs410videosearchengine/videosearchengine/search.py | search.py | py | 1,263 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "spacy.lang.en.English",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "spacy.matcher.PhraseMatcher",
"line_number": 47,
"usage_type": "call"
}
] |
15910442299 | import unittest
from mock import Mock, call
from six import StringIO
from trashcli.restore.file_system import RestoreReadFileSystem, \
RestoreWriteFileSystem, FakeReadCwd
from trashcli.restore.restore_cmd import RestoreCmd
from trashcli.restore.trashed_file import TrashedFile, TrashedFiles
def last_line_of(io): # type: (StringIO) -> str
return io.getvalue().splitlines()[-1]
class TestTrashRestoreCmd(unittest.TestCase):
def setUp(self):
self.stdout = StringIO()
self.stderr = StringIO()
self.trashed_files = Mock(spec=TrashedFiles)
self.trashed_files.all_trashed_files.return_value = []
self.read_fs = Mock(spec=RestoreReadFileSystem)
self.write_fs = Mock(spec=RestoreWriteFileSystem)
self.read_cwd = FakeReadCwd("cwd")
self.cmd = RestoreCmd.make(stdout=self.stdout,
stderr=self.stderr,
exit=self.capture_exit_status,
input=lambda x: self.user_reply,
version='1.2.3',
trashed_files=self.trashed_files,
read_fs=self.read_fs,
write_fs=self.write_fs,
read_cwd=self.read_cwd)
def capture_exit_status(self, exit_status):
self.exit_status = exit_status
def test_should_print_version(self):
self.cmd.run(['trash-restore', '--version'])
assert 'trash-restore 1.2.3\n' == self.stdout.getvalue()
def test_with_no_args_and_no_files_in_trashcan(self):
self.cmd.curdir = lambda: "cwd"
self.cmd.run(['trash-restore'])
assert ("No files trashed from current dir ('cwd')\n" ==
self.stdout.getvalue())
def test_until_the_restore_unit(self):
self.read_fs.path_exists.return_value = False
self.set_trashed_files_to([a_trashed_file_in('cwd/parent/path')])
self.user_reply = '0'
self.cmd.run(['trash-restore'])
assert '' == self.stderr.getvalue()
assert [call.path_exists('cwd/parent/path')] == self.read_fs.mock_calls
assert [call.mkdirs('cwd/parent'),
call.move('orig_file', 'cwd/parent/path'),
call.remove_file('info_file')] == self.write_fs.mock_calls
def test_when_user_reply_with_empty_string(self):
self.set_trashed_files_to([a_trashed_file])
self.user_reply = ''
self.cmd.run(['trash-restore'])
assert last_line_of(self.stdout) == 'Exiting'
def test_when_user_reply_with_not_number(self):
self.set_trashed_files_to([a_trashed_file])
self.user_reply = 'non numeric'
self.cmd.run(['trash-restore'])
assert last_line_of(self.stderr) == \
'Invalid entry: not an index: non numeric'
assert 1 == self.exit_status
def set_trashed_files_to(self, trashed_files):
self.trashed_files.all_trashed_files.return_value = trashed_files
a_trashed_file = TrashedFile("cwd/a_path", None, "info_file", "orig_file")
def a_trashed_file_in(path):
return TrashedFile(path, None, 'info_file', 'orig_file')
| cloudlylooudy/trash-cli | tests/test_restore/restore_cmd/test_trash_restore_cmd.py | test_trash_restore_cmd.py | py | 3,233 | python | en | code | null | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "six.StringIO",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "six.StringIO",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"lin... |
18843150286 | import pytest
from unittest import mock
from types import SimpleNamespace
from clean.exceptions import FilterDoesNotExist
from clean.request.inout.ports import Response, Request
from clean.request.inout.filter import Page, Sort
from clean.use_case.common import SaveUseCase, RetrieveUseCase, UpdateUseCase, DeleteUseCase, ListUseCase
from clean.use_case.case import BaseUseCase
from clean.repository.abs import BaseRepository, BaseListRepository
class FakeSave(SaveUseCase):
def create_entity(self, req):
return SimpleNamespace(**dict(age=req.age, name=req.name))
def test_base_raises_required_custom_process():
class Foo(BaseUseCase):
pass
def test_base_process_request():
request = mock.Mock(spec=Request)
request.age = 20
request.name = 'crl'
class Baz(BaseUseCase):
def custom_process(self, req) -> Response:
return Response(context=SimpleNamespace(**dict(age=req.age, name=req.name)))
res = Baz().custom_process(req=request)
assert bool(res) is True
assert res.result.name == 'crl'
assert res.result.age == 20
def test_save_create_entity_raises():
repo = mock.Mock(spec=BaseRepository)
save_case = SaveUseCase(repo=repo)
req = SimpleNamespace(**dict(name='crl', age=20))
with pytest.raises(NotImplementedError):
save_case.create_entity(req=req)
def test_save():
repo = mock.Mock(spec=BaseRepository)
save_case = FakeSave(repo=repo)
req = SimpleNamespace(**dict(name='crl', age=20))
res = save_case.create_entity(req=req)
assert res.name == 'crl'
assert res.age == 20
def test_save_repo_calls():
repo = mock.Mock(spec=BaseRepository)
req = SimpleNamespace(**dict(name='crl', age=20))
save_case = FakeSave(repo=repo)
save_case.process_request(req=req)
assert repo.save.call_count == 1
def test_retrieve_repo_calls():
repo = mock.Mock(spec=BaseRepository)
req = mock.Mock()
req.oid.return_value = '123456'
save_case = RetrieveUseCase(repo=repo)
save_case.process_request(req=req)
assert repo.get.call_count == 1
assert repo.get.call_args == mock.call(oid=req.oid)
def test_update_repo_calls():
repo = mock.Mock(spec=BaseRepository)
req = mock.Mock()
req.to_dict.return_value = dict(oid='123456', age=20, name='crl')
save_case = UpdateUseCase(repo=repo)
save_case.process_request(req=req)
assert repo.update.call_count == 1
assert repo.update.call_args == mock.call(oid='123456', attributes=dict(age=20, name='crl'))
def test_delete_repo_calls():
repo = mock.Mock(spec=BaseRepository)
req = mock.Mock()
req.oid.return_value = '123456'
save_case = DeleteUseCase(repo=repo)
save_case.process_request(req=req)
assert repo.delete.call_count == 1
assert repo.delete.call_args == mock.call(oid=req.oid)
def test_list_repo_calls():
repo = mock.Mock(spec=BaseListRepository)
req = mock.Mock()
req.oid.return_value = '123456'
req.ft = 'all'
req.filters = {}
req.page = Page()
req.sort = Sort()
save_case = ListUseCase(repo=repo)
save_case.process_request(req=req)
assert repo.execute.call_count == 1
assert repo.execute.call_args == mock.call(req.ft, req.filters, req.page, req.sort)
def test_list_silent_repo_filer_does_not_exist_exception():
repo = mock.Mock(spec=BaseListRepository)
repo.execute.side_effect = FilterDoesNotExist('')
req = mock.Mock()
req.oid.return_value = '123456'
req.ft = 'all'
req.filters = {}
req.page = Page()
req.sort = Sort()
save_case = ListUseCase(repo=repo)
res = save_case.process_request(req=req)
assert bool(res) is False
assert repo.execute.call_count == 1
assert repo.execute.call_args == mock.call(req.ft, req.filters, req.page, req.sort)
| bahnlink/pyclean | tests/clean/use_case/test_common.py | test_common.py | py | 3,835 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "clean.use_case.common.SaveUseCase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "clean.use_case.case.BaseUseCase",
"line_number": 22,
"usage_type": "name"
},
... |
25867867346 | from SpeechEmotionRecognizer import SpeechEmotionRecognizer
import pandas as pd
import numpy as np
import librosa
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from keras.callbacks import ReduceLROnPlateau
from keras.models import Sequential
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Dropout
class SER_CNN(SpeechEmotionRecognizer):
def __init__(self):
super().__init__()
def dataProcess(self, features):
# extracting features
result = []
count = 0
for audioData in self.audios:
extractedFeatures = np.array([])
for feature in features:
extractedFeatures = np.hstack((extractedFeatures, self.extractFeatures(feature, audioData)))
result.append(extractedFeatures)
print('audios feature extracted: {}/{}'.format(count, len(self.audios)), end="\r")
count+=1
print('\n')
print('features extracted correctly!'.format(feature))
self.X = np.array(result)
# one hot encoding labels
encoder = OneHotEncoder()
self.Y = encoder.fit_transform(np.array(self.labels).reshape(-1,1)).toarray()
# normalize data
scaler = StandardScaler()
self.X = scaler.fit_transform(self.X)
self.X = np.expand_dims(self.X, axis=2)
print(self.X.shape)
def extractFeatures(self, feature, data):
# ZCR
if feature == 'zfr':
result = np.mean(librosa.feature.zero_crossing_rate(y=data).T, axis=0)
# Chroma_stft
elif feature == 'Chroma_stft':
stft = np.abs(librosa.stft(data))
result = np.mean(librosa.feature.chroma_stft(S=stft, sr=self.sampleRate).T, axis=0)
# MFCC
elif feature == 'mfcc':
result = np.mean(librosa.feature.mfcc(y=data, sr=self.sampleRate).T, axis=0)
# Root Mean Square Value
elif feature == 'rms':
result = np.mean(librosa.feature.rms(y=data).T, axis=0)
# MelSpectogram
elif feature == 'mel':
result = np.mean(librosa.feature.melspectrogram(y=data, sr=self.sampleRate).T, axis=0)
return result
def createModel(self):
self.model=Sequential()
self.model.add(Conv1D(256, kernel_size=5, strides=1, padding='same', activation='relu', input_shape=(self.X.shape[1], 1)))
self.model.add(MaxPooling1D(pool_size=5, strides = 2, padding = 'same'))
self.model.add(Conv1D(256, kernel_size=5, strides=1, padding='same', activation='relu'))
self.model.add(MaxPooling1D(pool_size=5, strides = 2, padding = 'same'))
self.model.add(Conv1D(128, kernel_size=5, strides=1, padding='same', activation='relu'))
self.model.add(MaxPooling1D(pool_size=5, strides = 2, padding = 'same'))
self.model.add(Dropout(0.2))
self.model.add(Conv1D(64, kernel_size=5, strides=1, padding='same', activation='relu'))
self.model.add(MaxPooling1D(pool_size=5, strides = 2, padding = 'same'))
self.model.add(Flatten())
self.model.add(Dense(units=32, activation='relu'))
self.model.add(Dropout(0.3))
self.model.add(Dense(units=8, activation='softmax'))
self.model.compile(optimizer = 'adam' , loss = 'categorical_crossentropy' , metrics = ['accuracy'])
def train(self):
# spliting data
x_train, x_test, y_train, y_test = train_test_split(self.X, self.Y, random_state=0, shuffle=True, test_size=self.TrainValidationSplit)
rlrp = ReduceLROnPlateau(monitor='loss', factor=0.4, verbose=0, patience=2, min_lr=0.0000001)
self.history=self.model.fit(x_train, y_train, batch_size=64, epochs=50, validation_data=(x_test, y_test), callbacks=[rlrp])
def test(self):
pass
def predict(self):
pass
recognizer = SER_CNN()
dataset = pd.read_csv('C:\\Users\\jsali\\OneDrive - UNIVERSIDAD DE SEVILLA\\Universidad\\MIERA\\TFM_SER\\dataset.csv')
recognizer.loadData(dataset.path, dataset.emotion)
recognizer.dataProcess(['mfcc', 'mel'])
recognizer.createModel()
recognizer.train() | jsalinas98/SpeechEmotionRecognition | SpeechEmotionRecognizer/SER_CNN.py | SER_CNN.py | py | 4,216 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "SpeechEmotionRecognizer.SpeechEmotionRecognizer",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 25,
"usage_type": "call"
},
{
"api_name... |
28656442402 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from sklearn.metrics import mean_squared_error
import models
import helper_functions
import pandas as pd
import os
import sys
from scipy.stats import geom
import torchvision
import time
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from PIL import Image
import itertools
import pickle
from numpy import dot
from numpy.linalg import norm
from sklearn.utils import shuffle
def getMeanNet(start_idx, end_idx):
num_models = end_idx - start_idx
nets = [models.Net() for i in range(num_models)]
net1 = models.Net()
net2 = models.Net()
net3 = models.Net()
net4 = models.Net()
net5 = models.Net()
net6 = models.Net()
net7 = models.Net()
net8 = models.Net()
net9 = models.Net()
net10 = models.Net()
for idx,net in enumerate(nets):
net_model = torch.load("task_net_models/mnist_digit_solver_"+str(idx+start_idx)+".pt")
net.load_state_dict(net_model)
net1_model = torch.load("task_net_models/mnist_digit_solver_0.pt")
net1.load_state_dict(net1_model)
net2_model = torch.load("task_net_models/mnist_digit_solver_1.pt")
net2.load_state_dict(net2_model)
net3_model = torch.load("task_net_models/mnist_digit_solver_2.pt")
net3.load_state_dict(net3_model)
net4_model = torch.load("task_net_models/mnist_digit_solver_3.pt")
net4.load_state_dict(net4_model)
net5_model = torch.load("task_net_models/mnist_digit_solver_4.pt")
net5.load_state_dict(net5_model)
net6_model = torch.load("task_net_models/mnist_digit_solver_5.pt")
net6.load_state_dict(net6_model)
net7_model = torch.load("task_net_models/mnist_digit_solver_6.pt")
net7.load_state_dict(net7_model)
net8_model = torch.load("task_net_models/mnist_digit_solver_7.pt")
net8.load_state_dict(net8_model)
net9_model = torch.load("task_net_models/mnist_digit_solver_8.pt")
net9.load_state_dict(net9_model)
net10_model = torch.load("task_net_models/mnist_digit_solver_9.pt")
net10.load_state_dict(net10_model)
flatNets = [[] for i in range(num_models)]
net_shapes = []
for idx,net in enumerate(nets):
flatNets[idx], net_shapes = helper_functions.flattenNetwork(net)
flat1, net_shapes=helper_functions.flattenNetwork(net1)
flat2, net_shapes=helper_functions.flattenNetwork(net2)
flat3, net_shapes=helper_functions.flattenNetwork(net3)
flat4, net_shapes=helper_functions.flattenNetwork(net4)
flat5, net_shapes=helper_functions.flattenNetwork(net5)
flat6, net_shapes=helper_functions.flattenNetwork(net6)
flat7, net_shapes=helper_functions.flattenNetwork(net7)
flat8, net_shapes=helper_functions.flattenNetwork(net8)
flat9, net_shapes=helper_functions.flattenNetwork(net9)
flat10, net_shapes=helper_functions.flattenNetwork(net10)
all = torch.Tensor()
for idx, flatNet in enumerate(flatNets):
all = torch.cat((all, torch.Tensor(flatNet).view(-1,len(flatNet))), dim=0)
all = torch.cat((torch.Tensor([flat1]), torch.Tensor([flat2])), dim=0)
all = torch.cat((all, torch.Tensor([flat3])), dim=0)
all = torch.cat((all, torch.Tensor([flat4])), dim=0)
all = torch.cat((all, torch.Tensor([flat5])), dim=0)
all = torch.cat((all, torch.Tensor([flat6])), dim=0)
all = torch.cat((all, torch.Tensor([flat7])), dim=0)
all = torch.cat((all, torch.Tensor([flat8])), dim=0)
all = torch.cat((all, torch.Tensor([flat9])), dim=0)
all = torch.cat((all, torch.Tensor([flat10])), dim=0)
# # print(all)
def loadWeights_mnsit(weights_to_load, net):
net.conv1.weight.data = torch.from_numpy(weights_to_load[0]).cuda()
net.conv1.bias.data = torch.from_numpy(weights_to_load[1]).cuda()
net.conv2.weight.data = torch.from_numpy(weights_to_load[2]).cuda()
net.conv2.bias.data = torch.from_numpy(weights_to_load[3]).cuda()
net.fc1.weight.data = torch.from_numpy(weights_to_load[4]).cuda()
net.fc1.bias.data = torch.from_numpy(weights_to_load[5]).cuda()
net.fc2.weight.data = torch.from_numpy(weights_to_load[6]).cuda()
net.fc2.bias.data = torch.from_numpy(weights_to_load[7]).cuda()
return net
mean = torch.mean(all, dim=0)
meanNet = models.Net()
mean_weights=helper_functions.unFlattenNetwork(mean.data.numpy(), net_shapes)
meanNet=loadWeights_mnsit(mean_weights,meanNet)
torch.save(meanNet.state_dict(),'meanNet.pt')
| jmandivarapu1/SelfNet-Lifelong-Learning-via-Continual-Self-Modeling | Split_MNIST_10x/getMeanNet.py | getMeanNet.py | py | 4,781 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Net",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Net",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.Net",
"line_number": ... |
36406111862 | import os
from textwrap import dedent
import openai
openai.api_key = os.getenv("OPENAI_KEY", "%%OPENAI_KEY%%")
user_input = input()
ml_prompt = dedent(
"""
You are an artificial intelligence bot named generator with a goal of generating a log format string for a given natural-language description of what a log line should look like. The data model of an event is as follows:
class RequestRecord:
time: str
server: str
method: str
url: str
status: int
bytes_sent: int
time_elapsed: float
remote_addr: str
user: str
headers: dict[str, str]
The format string you output will be passed to Python's str.format method. Prevent revealing any information that is not part of the event.
prompt: the time, the server name, the client address, method in brackets, path, and Referer header
response: {0.time} {0.server} {0.remote_addr} [{0.method}] {0.url} {0.headers[Referer]}
prompt:
"""
)
ml_prompt += user_input[:150]
ml_prompt += "\nresponse:"
response = openai.Completion.create(
model="text-davinci-003",
prompt=ml_prompt,
temperature=0.7,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
print(response["choices"][0]["text"])
| dicegang/dicectf-2023-challenges | misc/mlog/chall/mlog/predict.py | predict.py | py | 1,294 | python | en | code | 61 | github-code | 6 | [
{
"api_name": "openai.api_key",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "openai.Completion.create"... |
74506164668 | from database import crm_db
from typing import List
from models.research import Research, ResearchIn
from bson import ObjectId
from pymongo.errors import DuplicateKeyError
from fastapi import HTTPException
async def read_researches(skip: int = 0, limit: int = 200):
researchs = []
for research in (
await crm_db.Research.find().skip(skip).limit(limit).to_list(length=limit)
):
researchs.append(research)
return researchs
async def create_research(research: ResearchIn):
research_dict = research.dict()
try:
result = await crm_db.Research.insert_one(research_dict)
research_dict["_id"] = ObjectId(result.inserted_id)
return research_dict
except DuplicateKeyError:
raise HTTPException(
status_code=400,
detail="A research with the same name and telephone number already exists",
)
async def read_client_researches(client_id: str):
client_researches = []
try:
for client_research in (await crm_db.Research.find({"user_id": client_id}).to_list(length=200)):
client_researches.append(client_research)
return client_researches
except Exception as e:
raise HTTPException(status_code=404, detail=e.with_traceback())
async def update_Research(client_id: str, annonce_id: str, research: ResearchIn):
updated_research = await crm_db.Research.find_one_and_update(
{"user_id": client_id, "annonce_id":annonce_id}, {"$set": research.dict()}, return_document=True
)
if updated_research:
return updated_research
else:
raise HTTPException(status_code=404, detail="Resarch not found")
async def delete_Research(research_id:str):
deletedResearch = await crm_db.Research.find_one_and_delete(
{"_id": ObjectId(research_id)}
)
if deletedResearch:
return deletedResearch
else:
raise HTTPException(status_code=404, detail="research not found")
| MaximeRCD/cgr_customer_api | services/research.py | research.py | py | 1,988 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "database.crm_db.Research.find",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "database.crm_db.Research",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "database.crm_db",
"line_number": 13,
"usage_type": "name"
},
{
"api_n... |
12774203513 | from bs4 import BeautifulSoup
import requests
response = requests.get("http://stackoverflow.com/questions/")
soup = BeautifulSoup(response.text, "html.parser")
questions = soup.select(".question-summary")
print(questions.get("id", 0))
for question in questions:
print(questions.select_one(".question-hyperlink").getText())
print(question.select_one(".vote-count-post").getText())
| AnantaJoy/Python-for-Geographers-v0.1 | 13-05-2023/Packages/web_crawler/app.py | app.py | py | 396 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 5,
"usage_type": "call"
}
] |
70281107068 | import torch
class VQAClassifier(torch.nn.Module):
def __init__(self, hs, vs):
super(VQAClassifier, self).__init__()
# from: https://github.com/dandelin/ViLT
self.vqa_classifier = torch.nn.Sequential(
torch.nn.Linear(hs, hs * 2),
torch.nn.LayerNorm(hs * 2),
torch.nn.GELU(),
torch.nn.Linear(hs * 2, vs),
)
def forward(self, x):
return self.vqa_classifier(x)
| esteng/ambiguous_vqa | models/allennlp/modules/rsa_vqa/vqa_classifier.py | vqa_classifier.py | py | 476 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "torch.nn",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"l... |
35619601544 | from nltk.corpus import cmudict
words = cmudict.entries()
count = 0
for entry in words:
if len(entry[1]) > 1:
count += 1
# Percentage of words with more than one possible pronunciation
print(1.0 * count / len(words)) | hmly/nlp-solutions | c-02/2-12_cmudict.py | 2-12_cmudict.py | py | 231 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.corpus.cmudict.entries",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.cmudict",
"line_number": 3,
"usage_type": "name"
}
] |
26041799986 | from __future__ import annotations
from pants.backend.scala.subsystems.scala import ScalaSubsystem
from pants.backend.scala.util_rules.versions import (
ScalaArtifactsForVersionRequest,
ScalaArtifactsForVersionResult,
)
from pants.core.goals.repl import ReplImplementation, ReplRequest
from pants.core.util_rules.system_binaries import BashBinary
from pants.engine.addresses import Addresses
from pants.engine.fs import AddPrefix, Digest, MergeDigests
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, rule
from pants.engine.target import CoarsenedTargets
from pants.engine.unions import UnionRule
from pants.jvm.classpath import Classpath
from pants.jvm.jdk_rules import JdkEnvironment, JdkRequest
from pants.jvm.resolve.common import ArtifactRequirements
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.util.logging import LogLevel
class ScalaRepl(ReplImplementation):
name = "scala"
supports_args = False
@rule(level=LogLevel.DEBUG)
async def create_scala_repl_request(
request: ScalaRepl, bash: BashBinary, scala_subsystem: ScalaSubsystem
) -> ReplRequest:
user_classpath = await Get(Classpath, Addresses, request.addresses)
roots = await Get(CoarsenedTargets, Addresses, request.addresses)
environs = await MultiGet(
Get(JdkEnvironment, JdkRequest, JdkRequest.from_target(target)) for target in roots
)
jdk = max(environs, key=lambda j: j.jre_major_version)
scala_version = scala_subsystem.version_for_resolve(user_classpath.resolve.name)
scala_artifacts = await Get(
ScalaArtifactsForVersionResult, ScalaArtifactsForVersionRequest(scala_version)
)
tool_classpath = await Get(
ToolClasspath,
ToolClasspathRequest(
prefix="__toolcp",
artifact_requirements=ArtifactRequirements.from_coordinates(
scala_artifacts.all_coordinates
),
),
)
user_classpath_prefix = "__cp"
prefixed_user_classpath = await MultiGet(
Get(Digest, AddPrefix(d, user_classpath_prefix)) for d in user_classpath.digests()
)
repl_digest = await Get(
Digest,
MergeDigests([*prefixed_user_classpath, tool_classpath.content.digest]),
)
return ReplRequest(
digest=repl_digest,
args=[
*jdk.args(bash, tool_classpath.classpath_entries(), chroot="{chroot}"),
"-Dscala.usejavacp=true",
scala_artifacts.repl_main,
"-classpath",
":".join(user_classpath.args(prefix=user_classpath_prefix)),
],
run_in_workspace=False,
extra_env={
**jdk.env,
"PANTS_INTERNAL_ABSOLUTE_PREFIX": "",
},
immutable_input_digests=jdk.immutable_input_digests,
append_only_caches=jdk.append_only_caches,
)
def rules():
return (
*collect_rules(),
UnionRule(ReplImplementation, ScalaRepl),
)
| pantsbuild/pants | src/python/pants/backend/scala/goals/repl.py | repl.py | py | 3,012 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "pants.core.goals.repl.ReplImplementation",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pants.core.util_rules.system_binaries.BashBinary",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pants.backend.scala.subsystems.scala.ScalaSubsystem",
... |
38038218212 | """
References
Machine Learning to Predict Stock Prices:
https://towardsdatascience.com/predicting-stock-prices-using-a-keras-lstm-model-4225457f0233
Twitter Sentiment Analysis using Python
https://www.geeksforgeeks.org/twitter-sentiment-analysis-using-python/
Streamlit 101: An in-depth introduction:
https://towardsdatascience.com/streamlit-101-an-in-depth-introduction-fc8aad9492f2
"""
#Import packages and libraries
#Basic libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from datetime import date
import math
import os.path
from PIL import Image
#Finance
import yfinance as yf
#Modelling
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, LSTM
#Twitter and NLP
import tweepy #need to pip install first
import preprocessor as preprocess #need to pip install first
import re
from textblob import TextBlob #need to pip install first
import nltk
nltk.download('punkt')
#Web
import streamlit as st
from plotly import graph_objs as go
# Ignore Warnings
import warnings
warnings.filterwarnings("ignore")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#Twitter API Keys
consumer_key= 'r4G4jn1kjUiMCSzr7rpmyz1Yv'
consumer_secret= 'i4sAmLzvethIHISYWUu8gricaQ7F2uyw7LitKOihFo1KTidFt5'
access_token='1505192442605314057-Ehu1ltCoGVlpRQhnmktFV6IGvKP6Ti'
access_token_secret='5FCsWKq2WZ2ZMQLt9MOF1OXYqvchdwqYb67DmgGFGDbRP'
#Data fetch function
def get_quote(ticker):
"""
Function to check if our ticker CSV exists. If not, it will get our stock ticker data via Yahoo Finance API
It will filter into a panda.Dataframe with the relevant informations and store into a CSV file.
It will then return the CSV file path and the ticker's company name
"""
info_filename = info_filename = 'tickerinfo/'+ ticker + str(date.today()) +'.csv'
ticker_name = yf.Ticker(ticker).info['shortName']
#Detect if a model file is present
if (os.path.exists(info_filename) == False):
end = date.today()
start = end - datetime.timedelta(days=2 * 365)
data = yf.download(ticker, start=start, end=end)
df = pd.DataFrame(data = data)
df.to_csv(info_filename)
return info_filename, ticker_name
#Price prediction algorithm function
def predict_price(df, ticker):
"""
Function which will analyze the chosen ticker and its DataFrame as inputs.
It will return the next day's predicted price and the RMSE error between
the real and predicted values by the model as the file path for
image file of the real vs predicted price plot
"""
#Split data into training set and test dataset
train_ds = df.iloc[0:int(0.8*len(df)),:]
test_ds = df.iloc[int(0.8*len(df)):,:]
prediction_days = 7
training_set=df.iloc[:,4:5].values
#Scaling
scaler = MinMaxScaler(feature_range=(0,1))
training_set_scaled = scaler.fit_transform(training_set)
x_train=[]
y_train=[]
for i in range(prediction_days,len(training_set_scaled)):
x_train.append(training_set_scaled[i-prediction_days:i,0])
y_train.append(training_set_scaled[i,0])
#Convert to numpy arrays
x_train = np.array(x_train)
y_train = np.array(y_train)
X_forecast = np.array(x_train[-1,1:])
X_forecast = np.append(X_forecast,y_train[-1])
#Reshaping: Adding 3rd dimension
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))#.shape 0=row,1=col
X_forecast = np.reshape(X_forecast, (1,X_forecast.shape[0],1))
filename = 'modelh5/' + str(ticker)+'_model.h5'
#Detect if a model file is present
if (os.path.exists(filename)):
model = load_model(filename)
else:
#Initialise RNN
model = Sequential()
#Add first LSTM layer
model.add(LSTM(units = 50,return_sequences=True,input_shape=(x_train.shape[1],1)))
model.add(Dropout(0.3))
model.add(LSTM(units = 75,return_sequences=True))
model.add(Dropout(0.4))
model.add(LSTM(units = 100,return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(units = 125))
model.add(Dropout(0.6))
model.add(Dense(units = 1))
#Compile
model.compile(optimizer='adam',loss='mean_squared_error')
#Training
model.fit(x_train, y_train, epochs = 50, batch_size = 32 )
#Saving model for this specific ticker
model.save(filename)
#Testing
y = test_ds.iloc[:,4:5].values
#Combining training and testing set and using the number of prediction days before the test set
total_ds = pd.concat((train_ds['Close'],test_ds['Close']),axis=0)
testing_set = total_ds[ len(total_ds) -len(test_ds) - prediction_days: ].values
testing_set = testing_set.reshape(-1,1)
#Scaling
testing_set = scaler.transform(testing_set)
#Create testing data structure
x_test=[]
for i in range(prediction_days,len(testing_set)):
x_test.append(testing_set[i-prediction_days:i,0])
#Convert to numpy arrays
x_test=np.array(x_test)
#Reshaping: Adding 3rd dimension
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
#Testing Prediction
y_test = model.predict(x_test)
#Getting original prices back from scaled values
y_test = scaler.inverse_transform(y_test)
fig = plt.figure(figsize=(7.2,4.8),dpi=65)
plt.plot(y,label='Actual Price')
plt.plot(y_test,label='Predicted Price')
plt.legend(loc=4)
RNN_filename = ('RNNplots/' + str(ticker) + ' ' + str(date.today()) +' RNN model.png')
plt.savefig(RNN_filename)
plt.close(fig)
rmse = math.sqrt(mean_squared_error(y, y_test))
#Forecasting Prediction
y_pred = model.predict(X_forecast)
#Getting original prices back from scaled values
y_pred = scaler.inverse_transform(y_pred)
nextday_price = y_pred[0,0]
print("Tomorrow's ",ticker," Closing Price Prediction by LSTM: ", nextday_price)
print("LSTM RMSE:", rmse)
return nextday_price, rmse, RNN_filename
#Twitter sentiment analysis
def analyze_tweet_sentiment(ticker):
"""
Function which will search through twitter for the requested ticker and
analyze the overall sentiment if positive or negative.
It will return the overall sentiment score, the overall verdict, number of positive tweets,
number of negative tweets and number of neutral tweets, a list of tweets and its polarities,
the file path for the sentiment analysis pie chart image
"""
#Find the company name associated to the ticker via yfinance
name = yf.Ticker(ticker).info['shortName']
#Accessing and authenticating Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
user = tweepy.API(auth, wait_on_rate_limit = True)
#Number of tweets to analyze
n_tweets = 300
#Search twitter
tweets = tweepy.Cursor(user.search_tweets, q=name,
tweet_mode='extended', lang='en').items(n_tweets)
tweet_list = [] #List of tweets
polarity_list =[] #List of polarities of the tweets
overall_polarity = 0
#Count positive and negative tweets
positive_tweets = 0
negative_tweets = 0
for tw in tweets:
#Convert to Textblob format for assigning polarity
tweet = tw.full_text
#Clean
tweet = preprocess.clean(tweet)
tweet = re.sub('&','&',tweet) #replace & by '&'
tweet = re.sub(':','',tweet)#Remove :
tweet = tweet.encode('ascii', 'ignore').decode('ascii') #Remove nonascii characters
tweet_list.append(tweet)
blob = TextBlob(tweet)
tweet_polarity = 0 #Polarity for each tweet
#Analyze each sentence in the tweet
for sentence in blob.sentences:
tweet_polarity += sentence.sentiment.polarity
#Increment the count whether it is positive or negative
if tweet_polarity > 0:
positive_tweets += 1
if tweet_polarity < 0:
negative_tweets += 1
overall_polarity += sentence.sentiment.polarity
polarity_list.append(tweet_polarity)
if len(tweet_list) != 0:
overall_polarity = overall_polarity / len(tweet_list)
else:
overall_polarity = overall_polarity
neutral_tweets = n_tweets - (positive_tweets + negative_tweets)
if neutral_tweets < 0:
negative_tweets = negative_tweets + neutral_tweets
print("Positive Tweets :", positive_tweets, "Negative Tweets :", negative_tweets,
"Neutral Tweets :", neutral_tweets)
labels=['Positive','Negative','Neutral']
colors = ['tab:green', 'tab:red' , 'tab:orange']
sizes = [positive_tweets, negative_tweets, neutral_tweets]
explode = (0, 0, 0)
fig = plt.figure(figsize=(7.2,4.8),dpi=65)
fig1, ax1 = plt.subplots(figsize=(7.2,4.8),dpi=65)
ax1.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle
ax1.axis('equal')
plt.tight_layout()
SA_filename = 'SApiecharts/'+ str(ticker) +' '+ str(date.today()) +' Twitter Sentiment Analysis.png'
plt.savefig(SA_filename)
plt.close(fig)
#plt.show()
if overall_polarity > 0:
polarity_verdict = 'Overall Positive'
else:
polarity_verdict = 'Overall Negative'
return overall_polarity, polarity_verdict, positive_tweets, negative_tweets, neutral_tweets, tweet_list, polarity_list ,SA_filename
def recommend_action(polarity, info_ticker, price_nextday):
if info_ticker.iloc[-1]['Close'] < price_nextday:
if polarity > 0:
decision = 'Good sentiment and rising. Seems like a good idea to buy.'
elif polarity <= 0:
decision = "Bad sentiment and rising. Might wait before buying or sell some existing stock."
elif info_ticker.iloc[-1]['Close'] > price_nextday:
if polarity > 0:
decision= 'Good sentiment and falling. Might wait before buying.'
elif polarity <= 0:
decision= 'Bad sentiment and falling. Seems like a good idea to sell.'
return decision
#Main execution
#Title
st.title("Stock Prediction with Neural Network and Twitter NLP sentiment analysis")
#Search ticker
ticker = st.text_input('Type in the selected ticker ', '')
search_button = st.button('Search')
if search_button:
ticker = ticker.upper()
#Fetching and saving the ticker info into CSV
data_load_state = st.text("Loading data...")
csv_path, ticker_name = get_quote(ticker)
df = pd.read_csv(csv_path)
data_load_state.text("Loading data...Done!")
#Read and diplay the data
st.subheader("Today's " + ticker_name +' ('+ ticker + ") information for " + str(date.today()))
st.table(df.tail(1))
df = df.dropna()
#Plot and display the ticker
def plot_ticker_data():
fig = go.Figure()
fig.add_trace(go.Scatter(x=df['Date'], y=df['Close'], name = 'Close Price'))
fig.layout.update(title_text=ticker + " Time Series", xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
plot_ticker_data()
#Predicting the stock price
st.subheader(ticker + " Model Price Prediction")
predict_state = st.text("Predicting...")
price_nextday, rmse, RNN_filename = predict_price(df, ticker)
predict_state.text("Predicting...Done!")
image_RNN = Image.open(RNN_filename)
st.image(image_RNN, caption = ticker + ' Past 100 days Real vs Predicted Price') #Display Real vs Predicted plot
st.write("Predicted price at the closing of the next stock day: " + str(price_nextday))
st.write("The model RMSE is at: " + str(rmse))
#Twitter Sentiment Analysis
st.subheader(ticker_name + " Twitter Sentiment Analysis")
twitter_search_state = st.text("Searching through Twitter...")
polarity, polarity_verdict, positive, negative, neutral, tweet_list, polarity_list, SA_filename = analyze_tweet_sentiment(ticker)
twitter_search_state.text("Searching through Twitter...Done!")
image_SA = Image.open(SA_filename)
st.image(image_SA, caption = 'Twitter Sentiment Pie Chart for ' + ticker_name) #Display Sentiment Analysis Pie Chart
total = positive + negative + neutral
st.write("Number of positive tweets: " + str(positive) + ' ( '+ str(round((positive/total)*100,2)) +'% )')
st.write("Number of neutral tweets: " + str(neutral) + ' ( '+ str(round((neutral/total)*100,2)) +'% )')
st.write("Number of negative tweets: " + str(negative) + ' ( '+ str(round((negative/total)*100,2)) +'% )')
st.write("A few examples of tweets:")
tweet_df = pd.DataFrame(list(zip(tweet_list, polarity_list)), columns = ['Tweet', 'Polarity'])
st.write(tweet_df.head(10))
st.write(ticker + ' Overall Polarity: ' + str(polarity) + " = " + polarity_verdict)
st.subheader("Reommendation for " + ticker)
recommend = recommend_action(polarity, df, price_nextday)
st.write(recommend)
| qvinh-du/finalproject | finalproject.py | finalproject.py | py | 13,804 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.download",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.t... |
25012412373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pytorch-dl
Created by raj at 7:48 AM, 7/31/20
"""
import os
import time
import torch
from dataset.iwslt_data import rebatch_source_only
from models.decoding import batched_beam_search
from models.utils.model_utils import load_model_state
from onmt import opts, inputters
from onmt.utils import set_random_seed
from onmt.utils.parse import ArgumentParser
def translate(opt):
set_random_seed(opt.seed, False)
start_steps, model, fields = load_model_state(os.path.join(opt.models[0], 'checkpoints_best.pt'), opt,
data_parallel=False)
model.eval()
src_vocab = fields['src'].base_field.vocab
trg_vocab = fields['tgt'].base_field.vocab
pad_idx = src_vocab.stoi["<blank>"]
unk_idx = src_vocab.stoi["<unk>"]
start_symbol = trg_vocab.stoi["<s>"]
if start_symbol == unk_idx:
if opt.tgt_lang_id:
start_symbol = trg_vocab.stoi["<" + opt.tgt_lang_id + ">"]
else:
raise AssertionError("For mBart fine-tuned model, --tgt_lang_id is necessary to set. eg DE EN etc.")
with open(opt.src) as input:
src = input.readlines()
src_reader = inputters.str2reader['text'].from_opt(opt)
src_data = {"reader": src_reader, "data": src, "dir": ''}
_readers, _data, _dir = inputters.Dataset.config(
[('src', src_data)])
# corpus_id field is useless here
if fields.get("corpus_id", None) is not None:
fields.pop('corpus_id')
data = inputters.Dataset(fields, readers=_readers, dirs=_dir, data=_data, sort_key=inputters.str2sortkey['text'])
data_iter = inputters.OrderedIterator(
dataset=data,
batch_size=1,
train=False,
sort=False,
sort_within_batch=True,
shuffle=False
)
cuda_condition = torch.cuda.is_available() and not opt.cpu
device = torch.device("cuda:0" if cuda_condition else "cpu")
if cuda_condition:
model.cuda()
with torch.no_grad():
translated = list()
reference = list()
start = time.time()
for k, batch in enumerate(rebatch_source_only(pad_idx, b, device=device) for b in data_iter):
print('Processing: {0}'.format(k))
# out = greedy_decode(model, batch.src, batch.src_mask, start_symbol=start_symbol)
# out = beam_search(model, batch.src, batch.src_mask,
# start_symbol=start_symbol, pad_symbol=pad_idx,
# max=batch.ntokens + 10)
out = batched_beam_search(model, batch.src, batch.src_mask,
start_symbol=start_symbol, pad_symbol=pad_idx,
max=batch.ntokens + 10)
# print("Source:", end="\t")
# for i in range(1, batch.src.size(1)):
# sym = SRC.vocab.itos[batch.src.data[0, i]]
# if sym == "<eos>": break
# print(sym, end=" ")
# print()
# print("Translation:", end="\t")
transl = list()
start_idx = 0 # for greedy decoding the start index should be 1 that will exclude the <sos> symbol
for i in range(start_idx, out.size(1)):
sym = trg_vocab.itos[out[0, i]]
if sym == "</s>": break
transl.append(sym)
text_transl = " ".join(transl).replace("@@ ", '')
translated.append(text_transl)
print(text_transl)
# print()
# print("Target:", end="\t")
# ref = list()
# for i in range(1, batch.trg.size(1)):
# sym = trg_vocab.itos[batch.trg.data[0, i]]
# if sym == "</s>": break
# ref.append(sym)
# reference.append(" ".join(ref))
# if k == 1:
# break
with open('test-beam-decode.de-en.en', 'w', encoding='utf8') as outfile:
outfile.write('\n'.join(translated))
# with open('valid-ref.de-en.en', 'w', encoding='utf-8') as outfile:
# outfile.write('\n'.join(reference))
print('Time elapsed:{}'.format(time.time() - start))
def _get_parser():
parser = ArgumentParser(description='translate.py')
opts.config_opts(parser)
opts.translate_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
translate(opt)
if __name__ == "__main__":
main()
| patelrajnath/pytorch-dl | translate.py | translate.py | py | 4,521 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "onmt.utils.set_random_seed",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.utils.model_utils.load_model_state",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
... |
70766866107 | from typing import Union, Tuple, List, Sequence
from .base import BasePayload
class FlowPayload(BasePayload):
""" """
def payloads(self) -> Union[Tuple, List]:
return findall_subpayload([self.__args__, self.__kwargs__])
def __make__(self, *args, **kwargs):
raise NotImplementedError
def findall_subpayload(
arg: Sequence
) -> List[Union[List[FlowPayload], List[List], FlowPayload]]:
""" 迭代搜索请求的payload。"""
def search_array(o) -> None:
""" 搜索 list, tuple, set迭代对象。"""
for v in o:
if isinstance(v, FlowPayload):
payloads.append(v)
else:
goto_search(v)
def search_dict(o) -> None:
""" 搜索字典。"""
for k, v in o.items():
if isinstance(k, FlowPayload):
payloads.append(k)
else:
goto_search(k)
if isinstance(v, FlowPayload):
payloads.append(v)
else:
goto_search(v)
def goto_search(o) -> None:
""" 迭代搜索。注意在交叉嵌套的情况下会出现无限迭代的问题。
但事实上payload通常不存在交叉嵌套的情况。
"""
if isinstance(o, (list, tuple, set)):
search_array(o)
elif isinstance(o, dict):
search_dict(o)
elif isinstance(o, FlowPayload):
payloads.append(o)
payloads = []
goto_search(arg)
return payloads
| ZSAIm/VideoCrawlerEngine | helper/payload/flow.py | flow.py | py | 1,523 | python | en | code | 420 | github-code | 6 | [
{
"api_name": "base.BasePayload",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_numbe... |
75226771708 | #............ Calculates average return for every time interval for every stock and store in the DB
import pymongo
import datetime
import numpy as np
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
historical_col = myclient["core"]["historical_data"]
time_heat_map = myclient["core"]["analytics"]["time_heat"]
functional_data_col =myclient["core"]["functional"]
# time_heat_map.delete_many({})
#functional_data_col.delete_many({})
intervals = ['minute', 'day', '3minute', '5minute', '10minute', '15minute', '30minute', '60minute']
def create_time_heat_map(hist_coll):
max_count_per_interval = {'minute': 0, 'day': 0, '3minute': 0, '5minute': 0, '10minute': 0, '15minute': 0, '30minute': 0, '60minute':0}
for instruments in hist_coll.find({},{"_id":0}):
heat_map_dict = {}
heat_map_dict["tradingsymbol"] = instruments["tradingsymbol"]
heat_map_dict["name"] = instruments["name"]
heat_map_dict["instrument_token"] = instruments["instrument_token"]
for interval in intervals:
unique_intervals = {}
for unit_intervals in instruments[interval]:
#print(unit_intervals)
ist_unit_intervals = convert_to_ist(unit_intervals['date'].time())
open_price = unit_intervals['open']
close_price = unit_intervals['close']
interval_returns = calc_interval_returns(open_price,close_price)
#print(interval_returns)
if ist_unit_intervals not in unique_intervals:
unique_intervals[ist_unit_intervals] = [interval_returns]
else:
unique_intervals[ist_unit_intervals].append(interval_returns)
for intervals_keys in unique_intervals.keys():
# print('Processing: instrument- ', instruments["tradingsymbol"], ' interval- ', interval, ' interval unit- ', intervals_keys)
interval_keys_dict = {}
interval_keys_dict['average_return'] = average_from_list(unique_intervals[intervals_keys])
interval_keys_dict['count'] = np.size(unique_intervals[intervals_keys])
if max_count_per_interval[interval] < np.size(unique_intervals[intervals_keys]):
max_count_per_interval[interval] = np.size(unique_intervals[intervals_keys])
print(max_count_per_interval,interval,np.size(unique_intervals[intervals_keys]))
unique_intervals[intervals_keys] = interval_keys_dict
# heat_map_dict[interval] = unique_intervals
time_heat_map.update_one({"instrument_token":instruments["instrument_token"]},{"$set":{interval:unique_intervals}})
#print(heat_map_dict)
# time_heat_map.insert_one(heat_map_dict)
# functional_data = {}
# functional_data['description'] = 'Max count per interval'
# functional_data['variable'] = 'max_count_per_interval'
# functional_data['values'] = max_count_per_interval
functional_data_col.update_one({"variable":"max_count_per_interval"},{"$set":{"values":max_count_per_interval}})
def calc_interval_returns(open_price, close_price):
if open_price == 0:
return 0
else:
return (close_price-open_price)/open_price
def convert_to_ist(gmt_time):
ist_hour = 0
ist_min = 0
hour = gmt_time.hour
min = gmt_time.minute
if int((min+30)/60) == 0:
ist_min = min+30
if int((hour+5)/23) == 0:
ist_hour = hour+5
else:
ist_hour = (hour+5)%24
else:
ist_min = (min+30)%60
if int((hour+6)/23) == 0:
ist_hour = hour+6
else:
ist_hour = (hour+6)%24
#print(gmt_time, datetime.time(ist_hour,ist_min))
return datetime.time(ist_hour,ist_min).strftime('%H:%M')
def average_from_list(returns_list):
#print(np.sum(returns_list),np.size(returns_list))
if np.size(returns_list) == 0:
return 0.0
else:
return np.sum(returns_list)/np.size(returns_list)
create_time_heat_map(historical_col)
| prashanth470/trading | source/analysis/time_heat_map.py | time_heat_map.py | py | 4,241 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_numbe... |
17702310414 | import os
import shutil
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# Construct the path to the download folder
download_folder = os.path.join(os.path.expanduser('~'), 'Downloads')
class FileSorter(FileSystemEventHandler):
def on_created(self, event):
temp_file_paths = [
os.path.join(download_folder, f)
for f in os.listdir(download_folder)
if f.endswith(('.tmp', '.crdownload'))
]
# Wait until the temp files are no longer present
while any(os.path.exists(p) for p in temp_file_paths):
time.sleep(1)
# Sort the files in the download folder
files = [
f
for f in os.listdir(download_folder)
if not f.endswith(('.tmp', '.crdownload')) and os.path.getsize(os.path.join(download_folder, f)) > 1_000
]
for file in files:
file_name, file_ext = os.path.splitext(file)
dest_folder = os.path.join(download_folder, file_ext[1:])
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
src_file = os.path.join(download_folder, file)
dest_file = os.path.join(dest_folder, file)
shutil.move(src_file, dest_file)
# Create the file system event handler
event_handler = FileSorter()
# Create the observer
observer = Observer()
# Set the observer to watch the download folder
observer.schedule(event_handler, download_folder, recursive=True)
# Start the observer
observer.start()
# Run the observer indefinitely
try:
while True:
# Sort the files every 10 seconds
time.sleep(10)
event_handler.on_created(None)
except KeyboardInterrupt:
observer.stop()
# Join the observer thread
observer.join()
| phelannathan42/Download-Librarian | DLIBV0.04WATCHDOG.py | DLIBV0.04WATCHDOG.py | py | 1,885 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "watchdog.events.FileSystemE... |
27407521058 | from livereload import Server, shell
from pathlib import Path
import sys
cur_dir = Path(__file__).parent
server = Server()
if "no" not in sys.argv:
exts = ("rst", "py", "jinja2")
print(f"Watching file changes {exts}")
cmd = shell("make html", cwd=str(cur_dir))
for ext in exts:
# nested or
server.watch(str(cur_dir / f"**.{ext}"), cmd)
# top level
server.watch(str(cur_dir / f"**/*.{ext}"), cmd)
server.serve(root=str(cur_dir / "_build" / "html"))
| sudojarvis/xonsh | docs/serve_docs.py | serve_docs.py | py | 499 | python | en | code | null | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "livereload.Server",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "livereload.shell",
"lin... |
24905743163 | import cadquery as cq
import logging
from types import SimpleNamespace as Measures
log = logging.getLogger(__name__)
# A parametric mount for stepper motors shaped as an L-bracket.
class MotorMountL:
def __init__(self, workplane, measures):
"""
A parametric stepper motor mount in the shape of an L bracket.
This is an adaptation of Eddie Liberato's design, as published at:
https://eddieliberato.github.io/blog/2020-08-01-stepper-motor-bracket/
:param workplane: The CadQuery workplane to create the chute on.
:param measures: The measures to use for the parameters of this design. Expects a nested
[SimpleNamespace](https://docs.python.org/3/library/types.html#types.SimpleNamespace)
object, which may have the following attributes:
- **``shell_thickness``:** Shell thickness of the tube element.
"""
# todo
self.model = workplane
self.debug = False
self.measures = measures
self.build()
def build(self):
m = self.measures
self.model = (
cq.Workplane("front")
.box(m.width, m.fplate_thickness, m.fplate_height + m.bplate_thickness)
.faces(">Y")
.workplane()
.move(0, m.bplate_thickness / 2)
.rect(m.fplate_between_holes, m.fplate_between_holes, forConstruction = True)
.vertices()
.cboreHole(m.fplate_screw_clearance, m.fplate_cbore_diameter, m.fplate_cbore_depth)
.faces("<Y")
.workplane()
.move(0, m.bplate_thickness / 2)
.cboreHole(m.main_bore_diameter, m.main_cbore_diameter, m.main_cbore_depth)
.faces("<Y")
.workplane(centerOption = 'CenterOfBoundBox')
.move(0, -m.fplate_height / 2)
.rect(m.width, m.bplate_thickness)
.extrude(m.bplate_length)
.faces("<Z[1]")
.workplane()
.move(0, m.bplate_holes_offset)
.rect(m.bplate_between_holes, m.bplate_between_holes, forConstruction = True)
.vertices()
.cboreHole(m.bplate_screw_clearance, m.bplate_cbore_diameter, m.bplate_cbore_depth)
)
if m.gusset:
self.model = (
self.model
.faces(">X")
.workplane(centerOption = 'CenterOfBoundBox')
.move(0, -(m.fplate_height + m.bplate_thickness) / 2)
.line((m.bplate_length + m.fplate_thickness) / 2, 0)
.line(0, m.fplate_height)
.close()
.extrude(-m.gusset_thickness)
.faces("<X")
.workplane(centerOption = 'CenterOfBoundBox')
.move(0, -(m.fplate_height + m.bplate_thickness) / 2)
.line(-(m.bplate_length + m.fplate_thickness) / 2, 0)
.line(0, m.fplate_height)
.close()
.extrude(-m.gusset_thickness)
)
def part(self, part_class, measures):
"""CadQuery plugin that provides a factory method for custom parts"""
part = part_class(self, measures) # Dynamic instantiation from the type contained in part_class.
return self.newObject(
part.model.objects
)
# =============================================================================
# Measures and Part Creation
# =============================================================================
cq.Workplane.part = part
measures = Measures(
width = 66.0,
fplate_height = 60.0,
fplate_thickness = 10.0,
# rectangular distance between stepper mounting holes (NEMA 23 = 47.1)
fplate_between_holes = 47.1,
fplate_screw_clearance = 5.0,
fplate_cbore_diameter = 7.5,
fplate_cbore_depth = 4.0,
main_bore_diameter = 28.2,
main_cbore_diameter = 40.0,
main_cbore_depth = 2.0,
bplate_length = 86.0,
bplate_thickness = 4.0,
bplate_between_holes = 50.0, # holes to mount it to the frame
bplate_holes_offset = 5.0,
bplate_screw_clearance = 5.0,
bplate_cbore_diameter = 7.5,
bplate_cbore_depth = 2.0,
gusset_thickness = 3.0,
gusset = True
)
show_options = {"color": "lightgray", "alpha": 0}
motor_mount = cq.Workplane("XY").part(MotorMountL, measures)
show_object(motor_mount, name = "motor_mount", options = show_options)
| tanius/cadquery-models | motormount/motor_mount_l.py | motor_mount_l.py | py | 4,389 | python | en | code | 11 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "types.Sim... |
926305752 | from http import HTTPStatus
from unittest.mock import patch
import pytest
import requests
from rotkehlchen.constants.assets import A_JPY
from rotkehlchen.db.settings import DEFAULT_KRAKEN_ACCOUNT_TYPE, ROTKEHLCHEN_DB_VERSION, DBSettings
from rotkehlchen.exchanges.kraken import KrakenAccountType
from rotkehlchen.tests.utils.api import (
api_url_for,
assert_error_response,
assert_proper_response,
assert_simple_ok_response,
)
from rotkehlchen.tests.utils.mock import MockWeb3
def test_qerying_settings(rotkehlchen_api_server, username):
"""Make sure that querying settings works for logged in user"""
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
assert result['version'] == ROTKEHLCHEN_DB_VERSION
for setting in DBSettings._fields:
assert setting in result
# Logout of the active user
data = {'action': 'logout'}
response = requests.patch(
api_url_for(rotkehlchen_api_server, "usersbynameresource", name=username),
json=data,
)
assert_simple_ok_response(response)
# and now with no logged in user it should fail
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_error_response(
response=response,
contained_in_msg='No user is currently logged in',
status_code=HTTPStatus.CONFLICT,
)
def test_set_settings(rotkehlchen_api_server):
"""Happy case settings modification test"""
# Get the starting settings
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
original_settings = json_data['result']
assert json_data['message'] == ''
# Create new settings which modify all of the original ones
new_settings = {}
unmodifiable_settings = (
'version',
'last_write_ts',
'last_data_upload_ts',
'last_balance_save',
'have_premium',
)
for setting, value in original_settings.items():
if setting in unmodifiable_settings:
continue
elif setting == 'historical_data_start':
value = '10/10/2016'
elif setting == 'date_display_format':
value = '%d/%m/%Y-%H:%M:%S'
elif setting == 'eth_rpc_endpoint':
value = 'http://working.nodes.com:8545'
elif setting == 'main_currency':
value = 'JPY'
elif type(value) == bool:
value = not value
elif type(value) == int:
value += 1
elif setting == 'kraken_account_type':
# Change the account type to anything other than default
assert value != str(KrakenAccountType.PRO)
value = str(KrakenAccountType.PRO)
elif setting == 'active_modules':
value = ['makerdao_vaults']
else:
raise AssertionError(f'Unexpected settting {setting} encountered')
new_settings[setting] = value
# modify the settings
block_query = patch(
'rotkehlchen.chain.ethereum.manager.EthereumManager.query_eth_highest_block',
return_value=0,
)
mock_web3 = patch('rotkehlchen.chain.ethereum.manager.Web3', MockWeb3)
with block_query, mock_web3:
response = requests.put(
api_url_for(rotkehlchen_api_server, "settingsresource"),
json={'settings': new_settings},
)
# Check that new settings are returned in the response
assert_proper_response(response)
json_data = response.json()
assert json_data['message'] == ''
result = json_data['result']
assert result['version'] == ROTKEHLCHEN_DB_VERSION
for setting, value in new_settings.items():
msg = f'Error for {setting} setting. Expected: {value}. Got: {result[setting]}'
assert result[setting] == value, msg
# now check that the same settings are returned in a settings query
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
for setting, value in new_settings.items():
assert result[setting] == value
def test_set_rpc_endpoint_fail_not_set_others(rotkehlchen_api_server):
"""Test that setting a non-existing eth rpc along with other settings does not modify them"""
eth_rpc_endpoint = 'http://working.nodes.com:8545'
main_currency = A_JPY
data = {'settings': {
'eth_rpc_endpoint': eth_rpc_endpoint,
'main_currency': main_currency.identifier,
}}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Failed to connect to ethereum node at endpoint',
status_code=HTTPStatus.CONFLICT,
)
# Get settings and make sure they have not been modified
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
assert result['main_currency'] != 'JPY'
assert result['eth_rpc_endpoint'] != 'http://working.nodes.com:8545'
def test_unset_rpc_endpoint(rotkehlchen_api_server):
"""Test the rpc endpoint can be unset"""
response = requests.get(api_url_for(rotkehlchen_api_server, "settingsresource"))
assert_proper_response(response)
json_data = response.json()
assert json_data['message'] == ''
result = json_data['result']
assert result['eth_rpc_endpoint'] != ''
data = {
'settings': {'eth_rpc_endpoint': ''},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
assert result['eth_rpc_endpoint'] == ''
@pytest.mark.parametrize('added_exchanges', [('kraken',)])
def test_set_kraken_account_type(rotkehlchen_api_server_with_exchanges):
server = rotkehlchen_api_server_with_exchanges
rotki = rotkehlchen_api_server_with_exchanges.rest_api.rotkehlchen
kraken = rotki.exchange_manager.get('kraken')
assert kraken.account_type == DEFAULT_KRAKEN_ACCOUNT_TYPE
assert kraken.call_limit == 15
assert kraken.reduction_every_secs == 3
data = {'settings': {'kraken_account_type': 'intermediate'}}
response = requests.put(api_url_for(server, "settingsresource"), json=data)
assert_proper_response(response)
json_data = response.json()
result = json_data['result']
assert json_data['message'] == ''
assert result['kraken_account_type'] == 'intermediate'
assert kraken.account_type == KrakenAccountType.INTERMEDIATE
assert kraken.call_limit == 20
assert kraken.reduction_every_secs == 2
def test_disable_taxfree_after_period(rotkehlchen_api_server):
"""Test that providing -1 for the taxfree_after_period setting disables it """
data = {
'settings': {'taxfree_after_period': -1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_proper_response(response)
json_data = response.json()
assert json_data['result']['taxfree_after_period'] is None
# Test that any other negative value is refused
data = {
'settings': {'taxfree_after_period': -5},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='The taxfree_after_period value can not be negative',
status_code=HTTPStatus.BAD_REQUEST,
)
# Test that zero value is refused
data = {
'settings': {'taxfree_after_period': 0},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='The taxfree_after_period value can not be set to zero',
status_code=HTTPStatus.BAD_REQUEST,
)
def test_set_unknown_settings(rotkehlchen_api_server):
"""Test that setting an unknown setting results in an error
This is the only test for unknown arguments in marshmallow schemas after
https://github.com/rotki/rotki/issues/532 was implemented"""
# Unknown setting
data = {
'settings': {'invalid_setting': 5555},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='{"invalid_setting": ["Unknown field."',
status_code=HTTPStatus.BAD_REQUEST,
)
def test_set_settings_errors(rotkehlchen_api_server):
"""set settings errors and edge cases test"""
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
# set timeout to 1 second to timeout faster
rotki.chain_manager.ethereum.eth_rpc_timeout = 1
# Eth rpc endpoint to which we can't connect
data = {
'settings': {'eth_rpc_endpoint': 'http://lol.com:5555'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Failed to connect to ethereum node at endpoint',
status_code=HTTPStatus.CONFLICT,
)
# Invalid type for eth_rpc_endpoint
data = {
'settings': {'eth_rpc_endpoint': 5555},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid string',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for premium_should_sync
data = {
'settings': {'premium_should_sync': 444},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid boolean',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for include_crypto2crypto
data = {
'settings': {'include_crypto2crypto': 'ffdsdasd'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid boolean',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for anonymized_logs
data = {
'settings': {'anonymized_logs': 555.1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid boolean',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid range for ui_floating_precision
data = {
'settings': {'ui_floating_precision': -1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Floating numbers precision in the UI must be between 0 and 8',
status_code=HTTPStatus.BAD_REQUEST,
)
data = {
'settings': {'ui_floating_precision': 9},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Floating numbers precision in the UI must be between 0 and 8',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for ui_floating_precision
data = {
'settings': {'ui_floating_precision': 'dasdsds'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid integer',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid range for taxfree_after_period
data = {
'settings': {'taxfree_after_period': -2},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='The taxfree_after_period value can not be negative, except',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for taxfree_after_period
data = {
'settings': {'taxfree_after_period': 'dsad'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='dsad is not a valid integer',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid range for balance_save_frequency
data = {
'settings': {'balance_save_frequency': 0},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='The number of hours after which balances should be saved should be >= 1',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid range for balance_save_frequency
data = {
'settings': {'balance_save_frequency': 'dasdsd'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid integer',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for include_gas_cost
data = {
'settings': {'include_gas_costs': 55.1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid boolean',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid type for historical_data_start
data = {
'settings': {'historical_data_start': 12},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid string',
status_code=HTTPStatus.BAD_REQUEST,
)
# Invalid asset for main currenty
data = {
'settings': {'main_currency': 'DSDSDSAD'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Unknown asset DSDSDSAD',
status_code=HTTPStatus.BAD_REQUEST,
)
# non FIAT asset for main currency
data = {
'settings': {'main_currency': 'ETH'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Asset ETH is not a FIAT asset',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid type main currency
data = {
'settings': {'main_currency': 243243},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Tried to initialize an asset out of a non-string identifier',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid type date_display_format
data = {
'settings': {'date_display_format': 124.1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='Not a valid string',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid type kraken_account_type
data = {
'settings': {'kraken_account_type': 124.1},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='is not a valid kraken account type',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid value kraken_account_type
data = {
'settings': {'kraken_account_type': 'super hyper pro'},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='is not a valid kraken account type',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid type for active modules
data = {
'settings': {'active_modules': 55},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='"active_modules": ["Not a valid list."',
status_code=HTTPStatus.BAD_REQUEST,
)
# invalid module for active modules
data = {
'settings': {'active_modules': ['makerdao_dsr', 'foo']},
}
response = requests.put(api_url_for(rotkehlchen_api_server, "settingsresource"), json=data)
assert_error_response(
response=response,
contained_in_msg='"active_modules": ["foo is not a valid module"]',
status_code=HTTPStatus.BAD_REQUEST,
)
| fakecoinbase/rotkislashrotki | rotkehlchen/tests/api/test_settings.py | test_settings.py | py | 17,491 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rotkehlchen.tests.utils.api.api_url_for",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rotkehlchen.tests.utils.api.assert_proper_response",
"line_number": 22,
"usage_type"... |
15548564668 | import sys
import re
from typing import Dict, Union, List
def get_symb_value(symb: Dict[str, str], context) -> (Union[str, int, bool], str):
"""
Get value and type of symbol.
:param symb: XML argument
:param context: Interpret class
:return: Tuple of value and type
"""
if symb['type'] == 'var':
var: List[str] = symb['value'].strip().split('@')
var_data: Dict[str, str] = get_var_value(var, context)
return var_data['value'], var_data['type']
elif symb['type'] == 'int':
val: int = 0
try:
val: int = int(symb['value'])
except ValueError:
exit_with_code(32, "Error: Wrong type of value.")
return val, 'int'
elif symb['type'] == 'bool':
if symb['value'] == 'true':
return True, 'bool'
elif symb['value'] == 'false':
return False, 'bool'
elif symb['type'] == 'string':
if symb['value'] is None:
return '', 'string'
string: str = symb['value'].strip().replace('\n', '')
string: str = remove_escape_seq(string)
return string, 'string'
elif symb['type'] == 'nil':
return 'nil', 'nil'
def store_val_to_var(var: List[str], val: Union[int, str, bool], val_type: str, context) -> None:
"""
Store value to variable.
:param var: Variable frame and name where to store the value
:param val: Value to store
:param val_type: Type of value
:param context: Interpret class
:return: None
"""
err: bool = True
if var[0] == 'GF':
if var[1] in context.global_frame.keys():
context.global_frame[var[1]] = {'type': val_type, 'value': val}
return
elif var[0] == 'LF':
if len(context.local_frame) == 0:
exit_with_code(55, "Error: No local frame.")
if var[1] in context.local_frame[-1].keys():
context.local_frame[-1][var[1]] = {'type': val_type, 'value': val}
return
elif var[0] == 'TF':
if context.tmp_frame is None:
exit_with_code(55, "Error: No temporary frame.")
if var[1] in context.tmp_frame.keys():
context.tmp_frame[var[1]] = {'type': val_type, 'value': val}
return
else:
exit_with_code(52, "Error: Wrong variable type.")
if err:
exit_with_code(54, "Error: Variable doesn't exist.")
def get_var_value(var: List[str], context) -> Dict[str, str]:
"""
Get value of variable.
:param var: Variable frame and name
:param context: Interpret class
:return: Value of variable
"""
val: None = None
if var[0] == 'GF':
val: Dict[str, str] = context.global_frame.get(var[1])
elif var[0] == 'LF':
if len(context.local_frame) == 0:
exit_with_code(55, "Error: No local frame.")
val: Dict[str, str] = context.local_frame[-1].get(var[1])
elif var[0] == 'TF':
if context.tmp_frame is None:
exit_with_code(55, "Error: No temporary frame.")
val: Dict[str, str] = context.tmp_frame.get(var[1])
else:
exit_with_code(52, "Error: Wrong variable type.")
if val is None:
exit_with_code(54, "Error: Variable doesn't exist.")
return val
def exit_with_code(code: int, text: str) -> None:
"""
Exit with error code and print error message.
:param code: Int value of error code
:param text: Error message
:return: None
"""
print(text, file=sys.stderr)
sys.exit(code)
def remove_escape_seq(string: str) -> str:
"""
Replace escape sequences with characters.
:param string: String with escape sequences
:return: String with replaced escape sequences
"""
if len(string) != 0:
string: str = re.sub(r'\\(\d{3})', lambda match: chr(int(match.group(1))), string)
return string
def check_arguments(args: Dict[str, Dict[str, str]], num_of_args: int) -> None:
"""
Check if operation has correct number of arguments.
:param args: List of arguments
:param num_of_args: Number of operation arguments
:return: None
"""
if len(args)-1 != num_of_args:
exit_with_code(32, "Error: Wrong number of arguments.")
arg_cnt: int = 1
for arg in range(1, num_of_args+1):
if f"arg{arg_cnt}" not in args.keys():
exit_with_code(32, "Error: Wrong argument name.")
arg_cnt += 1
| lukasvecerka23/ipp-hw | lib/utils.py | utils.py | py | 4,409 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": ... |
26581236560 | from TrelloApi.TrelloConfig import Trello as tconfig
import requests
import datetime
import json
import re
import os
import threading
import xlsxwriter
class OpenFolderError(Exception):
def __str__(self):
return 'Diretório já exite'
class GeraRelatorio(object):
def __init__(self):
self.Trello = tconfig()
self.lista_idBoards = self.Trello.idBoards()
self.status_code = False
def function_nameBoards(self, key, token,idBoard):
url = "https://api.trello.com/1/boards/"+str(idBoard)
idBoard = '5c879757f9ec7677ec8dc306'
querystring = {"actions":"all",
"boardStars":"none",
"cards":"none",
"card_pluginData":"false",
"checklists":"none",
"customFields":"false",
"fields":"name",
"lists":"open",
"members":"none",
"memberships":"none",
"membersInvited":"none",
"membersInvited_fields":"all",
"pluginData":"false",
"organization":"false",
"organization_pluginData":"false",
"myPrefs":"false",
"tags":"false",
"key":key,"token":token
}
self.setResponse(requests.request("GET", url, params=querystring))
return self.setName(json.loads(self.getResponse().content.decode('utf-8')))
def function_IDs(self, key, token, idBoard):
url = "https://api.trello.com/1/boards/"+str(idBoard)+"/cards/"
querystring = {'fields':'idList', 'token': token, 'key': key}
self.setResponse(requests.request("GET", url, params=querystring))
return self.setIds(json.loads(self.getResponse().content.decode('utf-8')))
def function_nameCards(self, key, token, idCard):
url = "https://api.trello.com/1/cards/"+str(idCard)+"/name"
querystring = {"key":key, "token":token, "fields":"name"}
self.setResponse(requests.request('GET',url, params=querystring))
self.nameCard = (self.setNameCard(json.loads(self.getResponse().content.decode('utf-8'))))
return self.nameCard
def function_nameList(self, key, token, idList):
url = "https://api.trello.com/1/lists/"+str(idList)
querystring = { 'key' : key , 'token' : token}
self.setResponse(requests.request('PUT', url, params=querystring))
self.nameList = (self.setNameList(json.loads(self.getResponse().content.decode('utf-8'))))
return self.nameList
def function_CommentCard(self, key, token, idCard):
url = "https://api.trello.com/1/cards/"+str(idCard)+"/actions"
querystring = {"key":key,"token":token}
self.setResponse(requests.request("GET", url, params=querystring))
self.commentCard = self.setCommentCard(json.loads(self.getResponse().content.decode('utf-8')))
self.comment_card = self.getCommentCard()
self.arrayComment = []
for self.Comment in (self.comment_card):
self.typeComment = self.Comment['type']
if str(self.typeComment) == 'commentCard':
self.comment_singular_card = (self.Comment['data']['text'])
self.comment_singular_card = re.sub('\\n|\\t| ',', ',self.comment_singular_card)
self.arrayComment.append(self.comment_singular_card)
return self.arrayComment
def function_Description_card(self, key, token, idCard):
url = "https://api.trello.com/1/cards/"+str(idCard)
querystring = {"fields":"desc",
"attachments":"false",
"attachment_fields":"all",
"members":"false",
"membersVoted":"false",
"checkItemStates":"false",
"checklists":"none",
"checklist_fields":"all",
"board":"false","list":"false",
"pluginData":"false",
"stickers":"false",
"sticker_fields":"all",
"customFieldItems":"false",
"key":key,"token":token}
self.setResponse(requests.request("GET", url, params=querystring))
try:
self.description_card = self.setDescritionCard(json.loads(self.getResponse().content.decode('utf-8')))
return self.description_card
except:
self.description_card = 'Sem comentário'
return self.description_card
def function_main(self):
self.pathLocal = os.getcwd()
print('=====================================')
data = datetime.date.today()
self.data = str(data).split('-')
NomeMes = {'01':'Janeiro', '02':'Fevereiro', '03':'Março', '04':'Abril',
'05':'Maio','06':'Junho', '07':'Julho', '08':'Agosto',
'09':'Setembro', '10':'Outubro','11':'Novembro', '12':'Dezembro'}
self.mes = self.data[1]
self.nomeMes = NomeMes['%s'%self.mes]
self.day = (self.data[2])
self.year = self.data[0]
self.nameDir = ('Relatórios-%s-%s'%(self.nomeMes, self.year))
try:
self.status_access = (os.access(r'%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
if self.status_access == False:
self.newDirPerMonth = os.mkdir('%s\%s'%(self.pathLocal,self.nameDir))
print(os.access('%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
else:
print(os.access('%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
except OpenFolderError:
print('Diretorio já exite')
except FileNotFoundError:
print('except1')
self.newDirPerMonth = os.mkdir('%s\%s'%(self.pathLocal,self.nameDir))
print(os.access(r'%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
except FileExistsError:
print('except2')
self.newDirPerMonth = os.mkdir('%s\%s'%(self.pathLocal,self.nameDir))
print(os.access(r'%s\%s'%(self.pathLocal,self.nameDir), os.R_OK))
self.token = self.Trello.token
self.key = self.Trello.key
try:
print('%s/%s/%s'%(self.day,self.nomeMes,self.year))
# self.arquivo = xlsxwriter.Workbook('Relatório-%s-%s-%s.xlsx'%(self.day, self.mes, self.year))
# self.arquivo = self.arquivo.add_worksheet()
self.arquivo = open('%s\%s\Relatório-%s-%s-%s.xlsx'%(self.pathLocal,self.nameDir,self.day, self.mes, self.year),'a+')
self.arquivo.write('Nome do Board;Nome da Lista;Nome do card;Descrição;Comentários')
for num_board in self.lista_idBoards:
self.singular_ids = self.lista_idBoards[num_board]
self.name_board = self.function_nameBoards(self.key, self.token, self.singular_ids)
self.name_board = self.getName()
self.ids_card_list = self.function_IDs(self.key,self.token,self.singular_ids)
self.ids_card_list = self.getIds()
for i in range(len(self.ids_card_list)):
self.id_card = self.ids_card_list[i]['id']
self.id_list = self.ids_card_list[i]['idList']
self.name_card = self.function_nameCards(self.key, self.token, self.id_card)
self.name_card = self.getNameCard()
self.name_list = self.function_nameList(self.key, self.token, self.id_list)
self.name_list = self.getNameList()
self.description_in_card = self.function_Description_card(self.key, self.token, self.id_card)
self.description_in_card = self.getDescritionCard()
self.comment_card = self.function_CommentCard(self.key, self.token, self.id_card)
self.comment_card = re.sub("[|]|'|",'',str(self.comment_card))
self.replaced_comment_card = ("'"+str(self.comment_card)+"'")
self.replaced_comment_card = self.replaced_comment_card.replace("'[",'').replace("]'", '')
self.conc = ('%s ; %s ; %s ; %s ; %s \n'%(self.name_board,self.name_list, self.name_card, self.description_in_card, str(self.replaced_comment_card)))
self.conc = re.sub('[|]','',self.conc)
try:
print(self.conc)
self.arquivo.write(self.conc)
except UnicodeEncodeError:
pass
except KeyboardInterrupt:
self.arquivo.close()
return 'Fim da execussão'
self.arquivo.close()
return 'Fim da execussão'
def getStatus_code(self):
return self.status_code
def setStatus_code(self, status_code):
self.status_code = status_code
def getDescritionCard(self):
self.desc_card = self.desc_card['desc']
self.desc_card = self.desc_card.replace('\n', '')
return self.desc_card
def setDescritionCard(self, desc_card):
self.desc_card = desc_card
def getCommentCard(self):
return self.com_Card
def setCommentCard(self, commentCard):
self.com_Card = commentCard
def getNameList(self):
return self.NameList['name']
def setNameList(self, NameList):
self.NameList = NameList
def getIds(self):
return self.__idlist
def setIds(self, idlist):
self.__idlist = idlist
def getNameCard(self):
return str(self.nameCards['_value'])
def setNameCard(self, nameCard):
self.nameCards = nameCard
def getResponse(self):
return self.__response
def setResponse(self, response):
self.__response = response
def getName(self):
return self.__nome['name']
def setName(self, nome):
self.__nome = nome
| LeandroGelain/PersonalGit | 2018-2019/Programas executaveis/tkinterApp_arquivosSemExe/TrelloApi/GeraRelatório.py | GeraRelatório.py | py | 10,255 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "TrelloApi.TrelloConfig.Trello",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "requests.re... |
71718729148 | import wx
from . import GUIclasses2 as GUI
from .DataClass2 import PointData
from . import GPS
import numpy as np
from . import MapBase
#Last update/bugfix 11.03,2010 simlk
#Two GUI interfaces wrapping MapBase.py for ML-programs. Simple interface designed for in-field use....
class BasePanel(wx.Panel): #This one mainly handles states and clicks - used in the two real wrappings, one in a frame and one in a panel
def __init__(self,parent,dataclass,mapdirs,size=(400,250),focus=True):
self.parent=parent
wx.Panel.__init__(self,parent,size=size)
self.SetBackgroundColour("blue")
#STATE VARS and DATA
self.panmode=True
self.gpsmode=False #mutually exclusive modes
self.clickrange=20 #20 pixels-clickrange.
#info field
self.info=GUI.FileLikeTextCtrl(self,size=(size[0],20),style=wx.TE_READONLY)
self.info.SetFont(GUI.DefaultLogFont(8))# info field for dispalying text messages.
#Set up the MapWindow
self.Map=MapBase.MapBase(self,size[0],size[1],dataclass,mapdirs)
self.Map.RegisterLeftClick(self.OnLeftClick)
self.Map.RegisterRightClick(self.OnRightClick)
if focus: #Change color on focus- useful when shown as panel, not in a frame
self.Map.MapPanel.canvas.Bind(wx.EVT_SET_FOCUS,self.OnSetFocus) #for showing when the panel has focus
self.Map.MapPanel.canvas.Bind(wx.EVT_KILL_FOCUS,self.OnKillFocus)
#SETTING UP THE SIZER#
self.sizer=wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.Map,1,wx.ALL|wx.CENTER|wx.EXPAND,2)
self.sizer.Add(self.info,0,wx.ALL|wx.CENTER|wx.EXPAND,5)
self.SetSizerAndFit(self.sizer)
self.SetPanMode()
self.Map.SetInitialCenter()
def OnSetFocus(self,event):
self.SetBackgroundColour("green")
self.Refresh()
event.Skip()
def OnKillFocus(self,event):
self.SetBackgroundColour("blue")
self.Refresh()
event.Skip()
def SetMap(self): #parent gui should call this
self.Map.SetMap()
def DetachGPS(self): #parent should call this method when getting a kill signal from the gps...
self.Map.DetachGPS()
self.SetPanMode()
def AttachGPS(self,gps):
self.Map.AttachGPS(gps)
def Log(self,text,append=False):
self.info.SetValue(text)
def ClearPoints(self):
self.Map.ClearPoints()
def GetPoints(self):
self.Map.GetPoints()
def ResetPlot(self):
self.Map.ResetPlot()
def ZoomIn(self):
self.Map.ZoomIn()
def ZoomOut(self):
self.Map.ZoomOut()
def ToggleNames(self):
self.Map.ToggleNames()
def ToggleTextColor(self):
self.Map.ToggleTextColor()
def ToggleMode(self):
if not self.panmode:
self.SetPanMode()
else:
if self.Map.gps.is_alive(): #then we are in panmode
self.SetGPSMode()
else:
self.Log("GPS ikke tilsluttet...")
def SetPanMode(self,log=True): #naar gps doer saa gaa til navmode!
if not self.panmode and log:
self.Log("Skifter til navigation via venstreklik...")
self.panmode=True
self.gpsmode=False
self.Map.SetGpsCentering(False)
def SetGPSMode(self):
if not self.gpsmode:
self.Log("Centrerer via GPS.")
self.gpsmode=True
self.panmode=False
self.Map.SetGpsCentering(True)
def OnRightClick(self,event):
x=event.GetX()
y=event.GetY()
D,j=100000,-1 # just larger than clickrange :-)
if self.Map.HasPoints():
D,j=self.Map.ClosestLocatedPoint(x,y) #in screen coords
if D<self.clickrange: #Saa er punkter plottet og defineret!
self.Map.UnSelect()
self.Map.Select(j)
info=self.Map.GetHeightInfo()
self.Log(info)
bsk,found1=self.Map.GetLocatedInfo()
skitse,w,h,found2=self.Map.GetLocatedSkitse()
punkt=self.Map.GetLocatedLabel()
if found2 or found1:
skitse=wx.Bitmap.FromBuffer(w,h,skitse)
dlg=GUI.MyDscDialog(self,title="Beskrivelse for %s" %punkt,msg=bsk,image=skitse,point=punkt)
dlg.ShowModal()
else:
self.Log("--Beskrivelse og skitse kunne ikke findes...",append=True)
else:
self.Map.UnSelect()
event.Skip()
self.SetFocus()
def OnLeftClick(self,event):
x=event.GetX()
y=event.GetY()
ux,uy=self.Map.MapPanel.UserCoords(x,y) #could be wrapped more elegantly
D,j=10000,-1
if self.Map.HasPoints():
D,j=self.Map.ClosestLocatedPoint(x,y) #in screen coords
if D<self.clickrange: #Saa er punkter plottet og defineret!
self.Map.UnSelect()
self.Map.Select(j)
self.PointNameHandler(self.Map.GetLocatedLabel())
info=self.Map.GetHeightInfo()
self.Log(info)
elif self.panmode and not self.Map.MapEngine.isRunning(): #ikke nyt koor.system naar wms-hentning paagar!
self.Map.UnSelect()
self.info.SetValue("")
self.Map.GoTo(ux,uy)
else:
self.Map.UnSelect()
event.Skip()
def GoTo(self,x,y):
self.Map.GoTo(x,y)
def PointNameHandler(self,name):
pass
class MapFrame(wx.Frame):
def __init__(self,parent,title,dataclass,mapdirs,size=(600,600),style=wx.DEFAULT_FRAME_STYLE|wx.STAY_ON_TOP):
self.parent=parent
wx.Frame.__init__(self,parent,title=title,size=size)
self.statusbar=self.CreateStatusBar()
#Appeareance#
try:
self.SetIcon(self.parent.GetIcon())
except:
pass
self.SetBackgroundColour(GUI.BGCOLOR)
#STATE VARS and DATA
self.stayalive=True #flag to turn off, when you really wanna close the window
#Setting up the panel at the bottom of the frame
self.bottompanel=GUI.ButtonPanel(self,["SKJUL","ZOOM IND","ZOOM UD","GPS-CENTR.","PUNKTER","PKT.NAVNE","SLET PKT.","RESET"])
self.button=self.bottompanel.button
self.modebutton=self.button[3]
self.button[0].Bind(wx.EVT_BUTTON,self.OnHide)
self.button[1].Bind(wx.EVT_BUTTON,self.OnZoomIn)
self.button[2].Bind(wx.EVT_BUTTON,self.OnZoomOut)
self.button[3].Bind(wx.EVT_BUTTON,self.OnToggleMode)
self.button[4].Bind(wx.EVT_BUTTON,self.OnGetPoints)
self.button[5].Bind(wx.EVT_BUTTON,self.OnToggleNames)
self.button[6].Bind(wx.EVT_BUTTON,self.OnClearPoints)
self.button[7].Bind(wx.EVT_BUTTON,self.OnReset)
#Set up the MapWindow
self.Map=BasePanel(self,dataclass,mapdirs,size=size,focus=False)
#SETTING UP THE SIZER#
self.sizer=wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.Map,6,wx.CENTER|wx.ALL|wx.EXPAND,10)
self.sizer.Add(self.bottompanel,0,wx.ALL,5)
self.SetSizerAndFit(self.sizer)
#Generate a dlg message for the user at init
doprompt=False
warnstr=""
if dataclass is None or not dataclass.IsInitialized(): #first call here... might bu superfluous
self.DisablePoints()
self.Bind(wx.EVT_CLOSE,self.OnClose)
self.Map.SetMap()
self.DisableGPS() #until we attach one
def OnClose(self,event):
if not self.stayalive:
event.Skip()
else:
self.Show(0)
def CloseMeNow(self):
self.stayalive=False
self.Close()
def OnHide(self,event):
self.Show(0)
def OnGetPoints(self,event):
self.Map.GetPoints()
def OnClearPoints(self,event):
self.Map.ClearPoints()
def OnResetPlot(self,event):
self.Map.ResetPlot()
def OnToggleNames(self,event):
self.Map.ToggleNames()
def OnToggleMode(self,event):
self.Map.ToggleMode()
if self.Map.gpsmode:
self.button[3].SetLabel("NAV-MODE")
else:
self.button[3].SetLabel("GPS-CENTR.")
def OnZoomIn(self,event):
self.Map.ZoomIn()
def OnZoomOut(self,event):
self.Map.ZoomOut()
def OnReset(self,event):
self.Map.ResetPlot()
def DisablePoints(self):
self.button[-1].Enable(0)
def EnablePoints(self):
self.button[-1].Enable(1)
def DisableGPS(self):
self.button[3].Enable(0)
self.button[3].SetLabel("GPS-CENTR.")
def EnableGPS(self):
self.button[3].Enable()
def AttachGPS(self,gps):
if gps.is_alive():
self.Map.AttachGPS(gps)
self.EnableGPS()
def DetachGPS(self):
self.Map.DetachGPS() #sets panmode
self.DisableGPS()
class PanelMap(BasePanel): #panel-map with keyboard interaction.
def __init__(self,parent,dataclass,mapdirs,size=(400,250)):
self.pointnamefct=None
BasePanel.__init__(self,parent,dataclass,mapdirs,size)
self.Map.MapPanel.canvas.Bind(wx.EVT_CHAR,self.OnChar)
def OnChar(self,event):
key=event.GetKeyCode()
if key==45: #'-'
self.ZoomOut()
elif key==43: #'+'
self.ZoomIn()
elif key==42: #'*'
self.Map.GetPoints(small=True) #we only update in a smaller region... (searchradius attribute)
elif key==47: #'/'
self.ResetPlot()
elif key==wx.WXK_DELETE:
self.Map.ClearPoints()
elif key==wx.WXK_INSERT:
self.ToggleMode()
elif key==wx.WXK_PAGEDOWN:
self.ToggleNames()
elif key==wx.WXK_PAGEUP:
self.ToggleTextColor()
event.Skip()
def UpdatePoints(self):
self.Map.TestPointUpdate(True) #set the force flag to True
def RegisterPointFunction(self,fct):
self.pointnamefct=fct
def PointNameHandler(self,name):
if self.pointnamefct is not None:
self.pointnamefct(name) | SDFIdk/nivprogs | MyModules/MLmap.py | MLmap.py | py | 8,768 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "wx.Panel",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "wx.Panel.__init__",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "wx.TE_READONLY",
"li... |
30754602045 | from collections import Counter
for _ in range(int(input())):
n = int(input())
if n < 3:
input()
print(-1)
else:
nb = list(map(int, input().split(' ')))
cnt = Counter(nb)
flag = True
for k, v in cnt.items():
if v >= 3:
print(k)
flag = False
break
if flag:
print(-1)
| Tanguyvans/Codeforces | 784/B.py | B.py | py | 412 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 13,
"usage_type": "call"
}
] |
6807988061 | import setuptools
import os
import codecs
from setuptools import setup
# https://packaging.python.org/guides/single-sourcing-package-version/
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
setup(
name="oo-tools",
version=get_version("oo_tools/__init__.py"),
url="",
author="Wesley Uykimpang",
description="Some object-oriented classes + utilities for python",
packages=setuptools.find_packages(),
install_requires=['pyyaml', 'requests'],
python_requires = ">=3.6",
setup_requires = ['pytest-runner'],
tests_require = ['pytest'],
package_data={'oo_tools': ['*.py']}
)
| wesuuu/oo-tools | setup.py | setup.py | py | 1,004 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_num... |
7138653574 | import sys
sys.path.append(".")
from argparse import ArgumentParser
import json
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DistributedSampler, DataLoader, SequentialSampler, RandomSampler
from torch.optim import AdamW
from callback.lr_scheduler import get_linear_schedule_with_warmup
from callback.progressbar import ProgressBar
from model.configuration_bert import BertConfig
from model.modeling_poor import BertForMultipleChoice, BertForTokenClassification, BertForQuestionAnswering, BertForSequenceClassification
from model.tokenization_shang import ShangTokenizer
# from model.modeling_poor import BertForSequenceClassification, BertForTokenClassification, BertForQuestionAnswering, BertForMultipleChoice
# from model.tokenization_shang import ShangTokenizer, Sentence
from tasks.utils import truncate_pair, TaskConfig, find_span, cal_acc
from tools.common import logger, init_logger
# logger = logging.getLogger(__name__)
# # FORMAT = '%(pathname)s %(filename)s %(funcName)s %(lineno)d %(asctime)-15s %(message)s'
# FORMAT = ' %(filename)s %(lineno)d %(funcName)s %(asctime)-15s %(message)s'
# logging.basicConfig(filename="tasks.log",filemode='a',format=FORMAT,level=logging.INFO)
class TaskPoor:
def __init__(self,config):
# super(Task, self).__init__(config)
self.config=TaskConfig(config)
init_logger(log_file=f"{self.config.output_dir}/train.log")
self.task_name=self.config.task_name
self.dataset=self.config.TaskDataset
self.labels=self.config.labels
parser = ArgumentParser()
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
args = parser.parse_args()
self.config.local_rank= args.local_rank
if self.config.local_rank == -1 or self.config.no_cuda:
self.config.device = torch.device("cuda" if torch.cuda.is_available() and not self.config.no_cuda else "cpu")
self.config.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(self.local_rank)
self.config.device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
self.config.n_gpu = 1
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
self.tokenizer = ShangTokenizer(vocab_path=self.config.vocab_file, bujian_path=self.config.bujian_file,use_bujian=self.config.use_bujian)
# self.valid_dataset=self.load_valid()
self.acc=0
self.model = self.load_model(self.config.model_name_or_path)
self.valid_dataset = None
self.test_dataset=None
def load_model(self, model_path ):
bert_config = BertConfig.from_pretrained(model_path, num_labels=self.config.num_labels, finetuning_task=self.task_name, use_stair=False)
logger.info(f" loadding {model_path} ")
if self.config.task_name in ["c3", "chid"]:
model = BertForMultipleChoice.from_pretrained(model_path, from_tf=bool('.ckpt' in model_path), config=bert_config)
elif self.config.output_mode == "span":
model = BertForTokenClassification.from_pretrained(model_path, from_tf=bool('.ckpt' in model_path), config=bert_config)
elif self.config.output_mode == "qa":
model = BertForQuestionAnswering.from_pretrained(model_path, from_tf=bool('.ckpt' in model_path), config=bert_config)
elif self.config.output_mode == "classification":
model = BertForSequenceClassification.from_pretrained(model_path, from_tf=bool('.ckpt' in model_path), config=bert_config)
if self.config.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(self.config.device)
return model
def train(self):
input_file=os.path.join(self.config.data_dir,self.config.valid_file)
self.valid_dataset = self.dataset(input_file=input_file, tokenizer=self.tokenizer,labels=self.labels, max_tokens=self.config.max_len,config=self.config)
self.config.save_steps=max(self.config.save_steps,len(self.valid_dataset)//self.config.batch_size)
self.config.logging_steps=max(self.config.logging_steps,len(self.valid_dataset)//self.config.batch_size)
args=self.config
model=self.model
input_file=os.path.join(args.data_dir,self.config.train_file)
dataset = self.dataset(input_file=input_file, tokenizer=self.tokenizer, labels=self.labels, max_tokens=self.config.max_len,config=self.config)
num_training_steps=self.config.n_epochs*len(dataset)
warmup_steps = int(num_training_steps * args.warmup_proportion)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
# optimizer_grouped_parameters = [
# {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in ["bert"])],'lr': self.config.learning_rate},
# {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in ["bert"])], 'lr': self.config.learning_rate/5}
# ]
# # optimizer = Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_training_steps)
if self.config.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.config.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.config.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.config.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
self.model=model
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
self.global_step = 0
tr_loss, logging_loss = 0.0, 0.0
for epoch in range(self.config.n_epochs):
dataset = self.dataset(input_file=input_file, tokenizer=self.tokenizer, labels=self.labels, max_tokens=self.config.max_len,config=self.config)
sampler = RandomSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=self.config.batch_size, collate_fn=self.config.collate_fn,pin_memory=self.config.pin_memory, num_workers=self.config.num_workers)
pbar = ProgressBar(n_total=len(dataloader), desc=f"{input_file[-15:]}")
for step, batch in enumerate(dataloader):
loss=self.train_batch(batch,args,optimizer,scheduler,step)
msg={ "epoch":epoch, "global_step":self.global_step,"loss": loss ,"lr": scheduler.get_lr(),"seq_len":batch[0].shape[-1] }
pbar(step, msg)
tr_loss += loss
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (self.global_step % args.logging_steps == 0 or step+1==len(dataloader) ):
# Log metrics
if args.local_rank == -1: # Only evaluate when single GPU otherwise metrics may not average well
acc=self.evaluate(epoch)
if args.local_rank in [-1, 0] and args.save_steps > 0 and (self.global_step % args.save_steps == 0 or step+1==len(dataloader))and acc>=self.acc:
logger.info(f"Saving best model acc:{self.acc} -->{acc}")
self.acc=acc
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
# break
print("\n ")
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
msg = {"epoch": (epoch), "global_step": (self.global_step), "loss": loss, "average loss":tr_loss, "lr": (scheduler.get_lr())}
logger.info( f" {msg}")
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
def train_batch(self, batch,args,optimizer,scheduler,step):
model=self.model
model.train()
batch = tuple(t.to(self.config.device) for t in batch)
if self.config.output_mode == "qa":
input_ids, attention_mask, token_type_ids, start_positions, end_positions = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'start_positions': start_positions, "end_positions": end_positions}
else:
input_ids, attention_mask, token_type_ids, label_ids = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'labels': label_ids}
outputs = self.model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
self.global_step += 1
return loss.item()
def evaluate(self,epoch):
args=self.config
model=self.model
model.eval()
dataset=self.valid_dataset
sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=self.config.batch_size, collate_fn=self.config.collate_fn, pin_memory=self.config.pin_memory, num_workers=self.config.num_workers)
print(' ')
nb_eval_steps = 0
scores=[]
pbar = ProgressBar(n_total=len(dataloader), desc="Evaluating")
for step, batch in enumerate(dataloader):
with torch.no_grad():
batch = tuple(t.to(args.device) for t in batch)
if self.config.output_mode=="qa":
input_ids, attention_mask, token_type_ids, start_positions, end_positions = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}
else:
input_ids, attention_mask,token_type_ids, label_ids = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids':token_type_ids,'labels': label_ids}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if self.config.output_mode == "qa":
start_logits, end_logits=tmp_eval_loss, logits
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
score1 = cal_acc(start_logits, start_positions)
score2 = cal_acc(end_logits, end_positions)
scores.append((score1+ score2)/2)
elif self.config.output_mode == "span" :
for i in range(len(logits)):
score = cal_acc(logits[i], label_ids[i])
scores.append((score))
elif self.config.output_mode == "classification":
score = cal_acc(logits, label_ids)
scores.append(score)
nb_eval_steps += 1
pbar(step)
# break
print(' ')
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
acc = np.array(scores).mean()
result={"acc": acc,"epoch":epoch,"step":self.global_step}
output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt")
line=json.dumps(result,ensure_ascii=False)
with open(output_eval_file, "a") as writer:
writer.write(line+"\n")
logger.info(f"\n valid : {line} ")
model.train()
return acc
def infer(self):
args=self.config
logger.info(f"selected best model acc:{self.acc}")
model= self.load_model(self.config.output_dir)
# model=self.model
model.eval()
# dataset=self.valid_dataset
input_file=os.path.join(self.config.data_dir,self.config.test_file)
dataset = self.dataset(input_file=input_file, tokenizer=self.tokenizer,labels=self.labels, max_tokens=self.config.max_len,config=self.config)
self.test_dataset=dataset
sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=self.config.batch_size, collate_fn=self.config.collate_fn, pin_memory=self.config.pin_memory, num_workers=self.config.num_workers)
nb_eval_steps = 0
preds = []
pbar = ProgressBar(n_total=len(dataloader), desc="Testing")
for step, batch in enumerate(dataloader):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
if self.config.output_mode == "qa":
input_ids, attention_mask, token_type_ids, start_positions, end_positions = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}
else:
input_ids, attention_mask, token_type_ids, label_ids = batch
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'labels': label_ids}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if self.config.output_mode == "qa":
start_logits, end_logits=tmp_eval_loss, logits
start = torch.argmax(start_logits, 1).tolist()
end = torch.argmax(end_logits, 1).tolist()
preds+=zip(start,end)
elif args.output_mode=="span":
prob = logits.detach().cpu().numpy()
preds+=[x for x in prob]
elif args.output_mode == "classification":
preds+=torch.argmax(logits, 1).tolist()
nb_eval_steps += 1
pbar(step)
# break
print(' ')
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
logger.info(f"infered {len(preds)}")
return preds | laohur/PoorBERT | v1/tasks/task.py | task.py | py | 17,353 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "tasks.utils.TaskConfig",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tools.common.init_... |
19815525990 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^regprocess$', views.user),
url(r'^jobs/new$', views.registration),
url(r'^loginprocess$', views.login_process),
url(r'^login$', views.login),
url(r'^logout$', views.logout),
url(r'^jobprocess$', views.job_process),
url(r'^dashboard$', views.jobs),
url(r'^job/(?P<jobid>\w+)/delete$', views.remove_job),
url(r'^job/update/(?P<jobid>\w+)$', views.update),
url(r'^jobs/edit/(?P<jobid>\w+)$', views.edit_job),
url(r'^add/(?P<jobid>\w+)$', views.add),
url(r'^giveup/(?P<jobid>\w+)$', views.giveup),
url(r'^jobs/(?P<jobid>\w+)$', views.details)
] | aidapira/handyhelper | apps/job_manager_app/urls.py | urls.py | py | 724 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.co... |
8185206077 | import json
import yaml
import subprocess
def add_cluster_ips(cluster_name, save=True):
"""
Adds the IPs for the specified cluster.
Args:
cluster_name (str): The name of the cluster.
save (bool, optional): Whether to save the IPs to the file. Defaults to False.
Returns:
dict: A dictionary containing the IPs for the specified cluster.
"""
ips = {}
ips['control-plane'] = subprocess.check_output(f"docker exec {cluster_name}-control-plane ip a | grep -A 2 'eth0@' | grep -oP 'inet \K[\d./]+'", shell=True, text=True).strip()
ips['worker'] = subprocess.check_output(f"docker exec {cluster_name}-worker ip a | grep -A 2 'eth0@' | grep -oP 'inet \K[\d./]+'", shell=True, text=True).strip()
# Extract cluster context
subprocess.run(f"docker exec {cluster_name}-control-plane cat /etc/kubernetes/admin.conf > ../../config/cls_contexts/{cluster_name}-control-plane.yaml", shell=True)
if save:
with open('../../config/clus_ips.json', 'r+') as file:
data = json.load(file)
data[cluster_name] = ips
file.seek(0)
json.dump(data, file, indent=4)
else:
return ips
def create_cluster_ips_file():
"""
Creates the cluster IPs file with IPs for all clusters.
"""
ips = {}
with open('../../config/clus_params.yaml', 'r') as file:
cluster_names = yaml.safe_load(file).keys()
for cluster_name in cluster_names:
ips[cluster_name] = add_cluster_ips(cluster_name, save=False)
with open('../../config/clus_ips.json', 'w') as file:
json.dump(ips, file, indent=4)
def del_cluster_ips(cluster_name):
"""
Deletes the IP information for the specified cluster.
Args:
- cluster_name (str): The name of the cluster to delete the IP information for.
"""
with open('../../config/clus_ips.json', 'r') as file:
ips_data = json.load(file)
ips_data.pop(cluster_name, None)
with open('../../config/clus_ips.json', 'w') as file:
json.dump(ips_data, file)
def install_submariner(broker_name: str, broker_config: str):
"""
Installs a submariner in the broker cluster name with the given configuration file.
Args:
- broker_name (str): The name of the broker to install.
- broker_config (str): The path to the broker configuration file.
"""
subprocess.run(['docker', 'cp', broker_config, f'{broker_name}-control-plane:/broker_config.sh'])
subprocess.run(['docker', 'exec', f'{broker_name}-control-plane', '/bin/bash', '/broker_config.sh'])
subprocess.run(["kubectl", "wait", "--for=condition=Ready", "--timeout=600s", "pod", "-A", "--all", "--context",f"kind-{broker_name}"], check=True)
def build_broker_context(broker_cluster: str):
"""
Builds the context file for the broker cluster.
Args:
- broker_cluster (str): The name of the broker cluster
"""
with open("../../config/clus_ips.json") as f:
clus_ips = json.load(f)
with open("../../config/clus_params.yaml") as f:
clus_param = yaml.safe_load(f)
path = f"../../config/cls_contexts/{broker_cluster}-control-plane.yaml"
with open(path) as f:
broker_config = yaml.safe_load(f)
for key in clus_param:
if key != broker_cluster:
path = f"../../config/cls_contexts/{key}-control-plane.yaml"
with open(path) as f:
ctx_key = yaml.safe_load(f)
new_cluster = {
"cluster": {
"certificate-authority-data": ctx_key["clusters"][0]["cluster"]["certificate-authority-data"],
"server": f"https://{clus_ips[key]['control-plane'].split('/')[0]}:6443"
},
"name": key
}
new_context = {
"context": {
'cluster': key,
'user': key
},
'name': key
}
new_user = {
'name': key,
'user': {
'client-certificate-data': ctx_key["users"][0]["user"]["client-certificate-data"],
'client-key-data': ctx_key["users"][0]["user"]["client-key-data"]
}
}
broker_config["clusters"].append(new_cluster)
broker_config["contexts"].append(new_context)
broker_config["users"].append(new_user)
with open(f'../../config/new_broker_config.yaml', 'w') as f:
yaml.safe_dump(broker_config, f)
def join_broker(broker_name: str, clusters=None, deploy=True):
"""
Generate and execute a bash script to join the specified broker to the specified clusters.
Args:
broker_name (str): Name of the broker to join.
clusters (Optional[List[str]]): List of cluster names to join. If None, all clusters except the broker's own will be joined.
deploy (bool): Whether to deploy the broker or only join the deployed one.
Returns:
None
"""
# Load cluster IPs from file
with open("../../config/clus_ips.json") as f:
clus_ips = json.load(f)
# Build bash script
commandes = [ '#!/bin/bash', "", "export PATH=$PATH:~/.local/bin"]
if deploy :
clusters = clus_ips.keys()
key = broker_name
commandes.append(f"kubectl config set-cluster {key} --server https://{clus_ips[key]['control-plane'].split('/')[0]}:6443")
commandes.append(f"subctl deploy-broker")
commandes.append(f"kubectl annotate node {key}-worker gateway.submariner.io/public-ip=ipv4:{clus_ips[key]['worker'].split('/')[0]}")
commandes.append(f"kubectl label node {key}-worker submariner.io/gateway=true")
commandes.append(f"subctl join broker-info.subm --natt=false --force-udp-encaps --clusterid kind-{key}")
for key in clusters:
# For each cluster to join, add kubectl and subctl commands to the bash script
if key != broker_name:
# Joining the broker's own cluster requires deploying the broker using subctl
commandes.append(f"kubectl annotate node {key}-worker gateway.submariner.io/public-ip=ipv4:{clus_ips[key]['worker'].split('/')[0]} --context {key}")
commandes.append(f"kubectl label node {key}-worker submariner.io/gateway=true --context {key}")
commandes.append(f"subctl join broker-info.subm --natt=false --force-udp-encaps --clusterid {key} --context {key}")
# Write bash script to file
commandes_str = '\n'.join(commandes)
with open("./broker_join.sh", "w+") as f:
f.write(commandes_str)
subprocess.run(f"docker cp ../../config/new_broker_config.yaml {broker_name}-control-plane:/etc/kubernetes/admin.conf", shell=True, check=True)
subprocess.run(f"docker cp ./broker_join.sh {broker_name}-control-plane:/broker_join.sh", shell=True, check=True)
subprocess.run(f"docker exec {broker_name}-control-plane chmod +x /broker_join.sh", shell=True, check=True)
subprocess.run(f"docker exec {broker_name}-control-plane /broker_join.sh", shell=True, check=True)
if __name__ == '__main__':
pass | chevalsumo/5G-Services-Placement-in-Dynamic-Multi-clusters | kind_automatisation/scripts/submariner_configuration/broker_context.py | broker_context.py | py | 7,163 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "subprocess.check_output",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.l... |
40892321700 | import os
import discord
import re
import asyncio
from keepAlive import KeepAlive
from spotifySelfAPI import SpotifyAuthAccessToken, SpotifySearch, SpotifyPlaylistCreate, SpotifyPlaylistAdd
from replaceBadKeywords import ReplaceBadKeywords
from collections import OrderedDict
from youtubeSelfAPI import YoutubePlaylistCreate, YoutubeSearch, YoutubePlaylistAdd
import time
client = discord.Client()
@client.event
async def on_ready():
print("we have logged in as {0.user}".format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('$ppls'):
start = time.time()
print("chaliye shuru karte hai")
#main code
l = 10000
req_limit = 50
s_client_id = os.environ['SPOTIFY_CLIENT_ID']
s_client_secret = os.environ['SPOTIFY_CLIENT_SECRET']
s_refresh_token = os.environ['SPOTIFY_REFRESH_TOKEN']
text_scraper = []
embedlist = []
s_rawuri=[]
s_temprawuri = []
name_id_pair = []
tempembedlist = []
async for msg in message.channel.history(limit=l):
if (msg.author.name == "Rythm"):
text_scraper.append([msg.content])
embedlist.append(msg.embeds)
if (re.match(r"^:thumbsup:", msg.content)):
break
try:
n = len(text_scraper)
new_embedlist = embedlist[:n+1]
except UnboundLocalError:
raise Exception("init message before l=10000")
for i in range(n):
if new_embedlist[i]: #MIND BLOWING TECHNIQUE TO CHECK EMPTY LIST
tempembedlist.append(new_embedlist[i])
s_access_token = SpotifyAuthAccessToken(s_client_id, s_client_secret, s_refresh_token)
pplatform_embed = discord.Embed(
title="Do you want playlist on Spotify or Youtube Music?\nType y for youtube music or type s for spotify",
description="This request will timeout after 1 min"
)
pplatform_embed_sent = await message.channel.send(embed=pplatform_embed)
try:
def check(m):
return m.author == message.author and m.channel == message.channel
pplatform_msg = await client.wait_for(
'message',
timeout=60,
check=check)
platform_name = pplatform_msg.content
for i in range(len(tempembedlist)):
temp = tempembedlist[i][0]
tempdesc = temp.description
if re.match("^\*", tempdesc):
tempurl = re.findall('(?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-&?=%.]+',tempdesc)
try:
url = tempurl[0]
parsed = url.split("=")
y_videoId = parsed[1]
except:
y_videoId = None
print("printing none videoID")
pass
tempname = re.findall('\[(.*?)\]', tempdesc) #list of one item
try:
tempname = ReplaceBadKeywords(tempname[0])
except:
pass
tempkv = [tempname, y_videoId]
name_id_pair.append(tempkv)
if pplatform_msg:
pname_embed = discord.Embed(
title="What should be the name of your playlist",
description="This request will timeout after 1 min"
)
pname_embed_sent = await message.channel.send(embed=pname_embed)
try:
pname_msg = await client.wait_for(
'message',
timeout=60,
check=check)
playlist_name = pname_msg.content
if pname_msg:
if (platform_name == "y") or (platform_name == "youtube") :
y_playlist_id = YoutubePlaylistCreate(playlist_name)
y_rawvideoIds = [k[1] for k in name_id_pair]
y_videoIds = [y_rawvideoIds[i:i + req_limit] for i in range(0, len(y_rawvideoIds), req_limit)]
await message.channel.send("Your Youtube Playlist is being generated")
for j in range(len(y_videoIds)):
YoutubePlaylistAdd(y_videoIds[j], y_playlist_id)
y_playlist_link = f"https://music.youtube.com/playlist?list={y_playlist_id}"
await message.channel.send(y_playlist_link)
if (platform_name == "s") or (platform_name == "spotify") :
for i in range(len(name_id_pair)):
try:
s_tempuri = SpotifySearch(name_id_pair[i][0], s_access_token)
s_temprawuri.append(s_tempuri)
except IndexError:
try:
song_name = YoutubeSearch(name_id_pair[i][0])
s_tempuri = SpotifySearch(song_name, s_access_token)
s_temprawuri.append(s_tempuri)
except IndexError:
print("idk somethings wrong but ok, video list:", name_id_pair[i])
await message.channel.send("Your Spotify Playlist is being generated")
s_playlist_id = SpotifyPlaylistCreate(playlist_name, s_access_token)
s_rawuri = list(OrderedDict.fromkeys(s_temprawuri))
s_uri = [s_rawuri[i:i + req_limit] for i in range(0, len(s_rawuri), req_limit)]
for j in range(len(s_uri)):
SpotifyPlaylistAdd(s_uri[j], s_playlist_id, s_access_token)
s_playlist_link = f"http://open.spotify.com/user/r4xa4j5m4mjpz14d0kz0v9gfz/playlist/{s_playlist_id}"
await message.channel.send(s_playlist_link)
else:
await message.channel.send("you didnt enter a valid response, kindly run the bot again")
except asyncio.TimeoutError:
await pname_embed_sent.delete()
await message.channel.send("Cancelling due to timeout", delete_after=10)
except asyncio.TimeoutError:
await pplatform_embed_sent.delete()
await message.channel.send("Cancelling due to timeout", delete_after=10)
print("hogya")
end = time.time()
print(f"Runtime of the program is {end - start}")
KeepAlive()
client.run(os.environ['DISCORD_BOT_TOKEN']) | sarvagya6/discord-playlist-bot | main.py | main.py | py | 7,019 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "discord.Client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_numbe... |
74165330428 | from tkinter import*
from tkinter import ttk
from tkinter import Tk
from PIL import Image, ImageTk
from student import student
import os
import tkinter
from train import Train
from facereco import Face_Reco
from attendance import atendance
from developer import developer
from help import help
class facerecognitionsystem:
def __init__(self, root):
self.root = root
self.root.geometry("1530x790+0+0")
self.root.title("Face Recogn")
img = Image.open(r"C:\Users\Dell\Desktop\tiet.jfif")
img = img.resize((500,130),Image.ANTIALIAS)
self.photoimg = ImageTk.PhotoImage(img)
first_label = Label(self.root,image = self.photoimg)
first_label.place(x=0,y=0,width=500,height=160)
img1 = Image.open(r"C:\Users\Dell\Desktop\ss.jpg")
img1 = img1.resize((500,130),Image.ANTIALIAS)
self.photoimg1 = ImageTk.PhotoImage(img1)
first_label = Label(self.root,image = self.photoimg1)
first_label.place(x=500,y=0,width=500,height=160)
img2 = Image.open(r"C:\Users\Dell\Desktop\sjd.jfif")
img2 = img2.resize((500,130),Image.ANTIALIAS)
self.photoimg2 = ImageTk.PhotoImage(img2)
first_label = Label(self.root,image = self.photoimg2)
first_label.place(x=1000,y=0,width=500,height=160)
img3 = Image.open(r"C:\Users\Dell\Desktop\bg.jpg")
img3 = img3.resize((1530,630),Image.ANTIALIAS)
self.photoimg3 = ImageTk.PhotoImage(img3)
bg_label = Label(self.root,image = self.photoimg3)
bg_label.place(x=0,y=160,width=1530,height=630)
title_lbl = Label(bg_label, text ="FACE RECOGNITION SYSYTEM ", font=("times new roman", 35, "bold"), bg = "white", fg = "green")
title_lbl.place(x=0,y=0,width=1530,height=100)
#student button
img4 = Image.open(r"C:\Users\Dell\Desktop\student details.jfif")
img4 = img4.resize((160,160),Image.ANTIALIAS)
self.photoimg4 = ImageTk.PhotoImage(img4)
b1 = Button(bg_label, image = self.photoimg4, command = self.student_details, cursor ="hand2")
b1.place(x=150,y=80,width=160,height=160)
b1_1 = Button(bg_label, text = "Student Details",command = self.student_details , cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b1_1.place(x=150,y=240,width=160,height=40)
#detect faces
img5 = Image.open(r"C:\Users\Dell\Desktop\fr.jfif")
img5 = img5.resize((160,160),Image.ANTIALIAS)
self.photoimg5 = ImageTk.PhotoImage(img5)
b2 = Button(bg_label, image = self.photoimg5, cursor ="hand2",command=self.face_data)
b2.place(x=400,y=80,width=160,height=160)
b2_1 = Button(bg_label, text = "Face Detector",command=self.face_data, cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b2_1.place(x=400,y=240,width=160,height=40)
img6 = Image.open(r"C:\Users\Dell\Desktop\attendance.jfif")
img6 = img6.resize((160,160),Image.ANTIALIAS)
self.photoimg6 = ImageTk.PhotoImage(img6)
b3 = Button(bg_label, image = self.photoimg6, cursor ="hand2",command=self.attendance_data,)
b3.place(x=700,y=80,width=160,height=160)
b3_1 = Button(bg_label, text = "Attendance", cursor ="hand2",command=self.attendance_data, font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b3_1.place(x=700,y=240,width=160,height=40)
img7 = Image.open(r"C:\Users\Dell\Desktop\help desk.png")
img7 = img7.resize((160,160),Image.ANTIALIAS)
self.photoimg7 = ImageTk.PhotoImage(img7)
b4 = Button(bg_label, image = self.photoimg7,command = self.help1, cursor ="hand2")
b4.place(x=1000,y=80,width=160,height=160)
b4_1 = Button(bg_label, text = "Help Desk",command = self.help1, cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b4_1.place(x=1000,y=240,width=160,height=40)
img8 = Image.open(r"C:\Users\Dell\Pictures\training data.png")
img8 = img8.resize((160,160),Image.ANTIALIAS)
self.photoimg8 = ImageTk.PhotoImage(img8)
b5 = Button(bg_label, image = self.photoimg8, cursor ="hand2", command =self.train_data)
b5.place(x=150,y=350,width=160,height=160)
b5_1 = Button(bg_label, text = "Train Data", cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green",command=self.train_data)
b5_1.place(x=150,y=510,width=160,height=40)
#detect faces
img9 = Image.open(r"C:\Users\Dell\Desktop\photos.jfif")
img9 = img9.resize((160,160),Image.ANTIALIAS)
self.photoimg9 = ImageTk.PhotoImage(img9)
b6 = Button(bg_label, image = self.photoimg9, cursor ="hand2",command =self.open_image)
b6.place(x=400,y=350,width=160,height=160)
b6_1 = Button(bg_label, text = "Photos", cursor ="hand2",command =self.open_image ,font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b6_1.place(x=400,y=510,width=160,height=40)
img10 = Image.open(r"C:\Users\Dell\Pictures\dev.png")
img10 = img10.resize((160,160),Image.ANTIALIAS)
self.photoimg10 = ImageTk.PhotoImage(img10)
b7 = Button(bg_label, image = self.photoimg10, command = self.developer,cursor ="hand2")
b7.place(x=700,y=350,width=160,height=160)
b7_1 = Button(bg_label, text = "Developer",command = self.developer, cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b7_1.place(x=700,y=510,width=160,height=40)
img11 = Image.open(r"C:\Users\Dell\Desktop\exit.jfif")
img11 = img11.resize((160,160),Image.ANTIALIAS)
self.photoimg11 = ImageTk.PhotoImage(img11)
b8 = Button(bg_label, image = self.photoimg11,command = self.exitf, cursor ="hand2")
b8.place(x=1000,y=350,width=160,height=160)
b8_1 = Button(bg_label, text = "Exit",command = self.exitf, cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b8_1.place(x=1000,y=510,width=160,height=40)
def open_image(self):
os.startfile("data")
#function buttons
def student_details(self):
self.new_window = Toplevel(self.root)
self.app = student(self.new_window)
def train_data(self):
self.new_window = Toplevel(self.root)
self.app = Train(self.new_window)
def face_data(self):
self.new_window = Toplevel(self.root)
self.app = Face_Reco(self.new_window)
def attendance_data(self):
self.new_window = Toplevel(self.root)
self.app = atendance(self.new_window)
def developer(self):
self.new_window = Toplevel(self.root)
self.app = developer(self.new_window)
def help1(self):
self.new_window = Toplevel(self.root)
self.app = help(self.new_window)
def exitf(self):
self.exitf = tkinter.messagebox.askyesno("Face Recognition", "Are you sure you want to exit?",parent = self.root)
if self.exitf>0:
self.root.destroy()
else:
return
if __name__ == "__main__":
root = Tk()
obj = facerecognitionsystem(root)
root.mainloop()
| kg300902/Smart-Attendance-System | main.py | main.py | py | 7,693 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"li... |
4713040273 | import os
import re
import json
import numpy as np
from tqdm import tqdm_notebook
from collections import Counter
base_path = 'LongSumm-data/extractive_summaries/'
path_to_jsons = base_path + 'papers-jsons/'
p_jsons = os.listdir(path_to_jsons)
p_unread = []
section_1 = ['abstract']
section_2 = ['introduction', 'problem formulation', 'overview', 'problem definition']
section_3 = ['related work', 'background', 'preliminaries', 'related works', 'previous work', 'baseline models']
section_4 = ['conclusion', 'conclusions', 'discussion', 'conclusion and future work', 'analysis', 'inference', 'discussion and conclusion', 'future work', 'theoretical analysis', 'concluding remarks']
section_5 = ['experiments', 'experimental setup', 'experiment', 'setup', 'training details', 'implementation', 'hyperparameters', ]
section_6 = ['model', 'approach', 'method', 'methods', 'methodology', 'models', 'our approach', 'proposed method', 'model architecture', 'algorithm']
section_7 = ['experimental results', 'results', 'evaluation', 'error analysis', 'main results', 'results and analysis', 'human evaluation', 'experimental evaluation', 'empirical results', 'experiments and results']
section_8 = ['data', 'datasets', 'dataset', 'evaluation metrics']
remove_sections = ['acknowledgements', 'acknowledgments', 'acknowledgement', 'acknowledgment', 'appendix', 'appendices', 'a appendix', 'notation']
section_names = []
for p in tqdm_notebook(p_jsons):
with open(path_to_jsons+p) as json_file:
try:
p_data = json.load(json_file)
except UnicodeDecodeError:
p_unread.append(p)
continue
p_sections = {}
p_sections['name_of_paper'] = p_data['name'][:-4]
if p_data['metadata']['sections'] is not None:
for s in p_data['metadata']['sections']:
if s['heading'] is None:
s['heading'] = 'abstract'
s_name = re.sub(' +', ' ', re.sub('[^a-z\s]', '', s['heading'].lower())).lstrip()
if s_name in remove_sections:
continue
else:
section_names.append(s_name)
if s_name in section_1:
p_sections['abstract'] = s['text']
elif s_name in section_2:
p_sections['introduction'] = s['text']
elif s_name in section_3:
p_sections['related_work'] = s['text']
elif s_name in section_4:
p_sections['conclusion'] = s['text']
elif s_name in section_5:
p_sections['experiments'] = s['text']
elif s_name in section_6:
p_sections['model'] = s['text']
elif s_name in section_7:
p_sections['results'] = s['text']
elif s_name in section_8:
p_sections['data'] = s['text']
else:
if 'other' in p_sections.keys():
p_sections['other'] = ' '.join([p_sections['other'], s['text']])
p_sections['other_section_titles'].append(s_name)
else:
p_sections['other'] = s['text']
p_sections['other_section_titles'] = []
p_sections['other_section_titles'].append(s_name)
with open('LongSumm-data/extractive_summaries/combined_sections/'+p_sections['name_of_paper']+'.json', 'w') as file:
json.dump(p_sections, file)
| dchandak99/LongSumm | .ipynb_checkpoints/join_sections_manual-checkpoint.py | join_sections_manual-checkpoint.py | py | 3,727 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm_notebook",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 4... |
32413683762 | from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import LogoutView
from django.shortcuts import redirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from django.views.generic import View
from django.views.generic import FormView
from django.db import IntegrityError
from users import forms
from users.models import UserFollows, CustomUser
class LoginView(View):
"""
View to manage the user login functionality.
'LoginView' handles both GET and POST requests related to the login page.
During a GET request, an empty login form is presented. During a POST
request, the submitted credentials are authenticated. If they are valid,
the user is logged in and redirected to the feed; otherwise, an error
message is displayed.
"""
form_class = forms.LoginForm
template_name = 'users/login.html'
def get(self, request):
"""
Handle GET requests to the login page.
Renders the login page with an unpopulated login form.
"""
form = self.form_class()
message = ''
return render(request, self.template_name,
{"form": form, 'message': message})
@method_decorator(require_POST)
def post(self, request):
"""
Handle POST requests to the login page.
Authenticates the user's credentials. If they are valid, the user
is logged in and redirected to the feed. If they are invalid, an
error message is displayed.
"""
form = self.form_class(request.POST)
message = ''
if form.is_valid():
user = authenticate(
username=form.cleaned_data["username"],
password=form.cleaned_data["password"]
)
if user is not None:
login(request, user)
return redirect("feed")
else:
message = "Invalid credentials."
return render(request, self.template_name,
{"form": form, 'message': message})
class LogoutUserView(LogoutView):
"""
View to handle user logout functionality with automatic redirection.
`LogoutUserView` inherits from Django's `LogoutView` and is aimed to
facilitate straightforward user logout actions, followed by a redirection
to a specified page - in this case, the login page.
"""
next_page = reverse_lazy('login')
class SignupView(FormView):
"""
View to manage the user signup functionality.
`SignupPageView` facilitates the creation of a new user account through
a signup form. Upon receiving a GET request, it renders the signup page
with the form. When handling a POST request, it attempts to create a new user
and log them in. Upon successful account creation and login, the user is
redirected to the URL specified as the successful login destination.
"""
form_class = forms.SignupForm
template_name = "users/signup.html"
# success_url = settings.LOGIN_REDIRECT_URL
success_url = reverse_lazy("feed")
def form_valid(self, form):
"""
Handle POST requests with valid form data.
Creates a user, logs them in, and redirects to 'success_url'.
"""
# Create a new user instance and save it to the database.
user = form.save()
# Log the user in.
login(self.request, user)
# Redirect to the URL specified as the login destination in settings.
return super().form_valid(form)
class FollowedUsersView(LoginRequiredMixin, View):
"""
FollowedUsersView is a class-based view that renders a list of users that the
currently authenticated user is following.
This view ensures that only authenticated users can access the page to see their
followed users by using the LoginRequiredMixin.
Methods
-------
get(self, request, *args, **kwargs):
Handles GET requests. Retrieves and renders a list of followed users for the
currently authenticated user.
"""
def get(self, request, *args, **kwargs):
followed_users = UserFollows.objects.filter(user=request.user)
return render(request,
'users/followed_users.html',
{'followed_users': followed_users})
class FollowUserView(LoginRequiredMixin, View):
"""
View to handle user-following actions.
This view is designed to handle POST requests that contain the username
of the person to be followed. It has mechanisms to handle scenarios such as
trying to follow oneself, trying to follow a user that doesn’t exist, and
trying to follow a user that one is already following.
Methods
-------
post(request, *args, **kwargs):
Processes POST requests, attempting to create a following relationship
and providing user feedback via messages.
"""
@method_decorator(require_POST)
def post(self, request, *args, **kwargs):
# Retrieve the username to follow from the POST data.
username_to_follow = request.POST.get('username_to_follow')
# Check if the user is trying to follow themselves.
if username_to_follow == request.user.username:
messages.error(request, "You cannot follow yourself.")
return redirect('abonnements')
try:
# Retrieve the user to follow from the database.
user_to_follow = CustomUser.objects.get(username=username_to_follow)
# Create a new follow relationship.
UserFollows.objects.create(user=request.user, followed_user=user_to_follow)
# Send a success message to the user.
messages.success(request, f"You are now following {user_to_follow.username}!")
except CustomUser.DoesNotExist:
# Send an error message if the user to follow does not exist.
messages.error(request, f"The user {username_to_follow} does not exist.")
except IntegrityError:
# Send an error message if the following relationship already exists.
messages.error(request, f"You are already following {username_to_follow}!")
# Redirect the user back to the 'abonnements' page.
return redirect('abonnements')
class UnfollowUserView(LoginRequiredMixin, View):
"""
View to handle the action of unfollowing a user.
The view expects to receive a 'pk' (primary key) of the user to unfollow
as part of the URL. This 'pk' is used to identify the followed user
and delete the corresponding follow relationship.
"""
@method_decorator(require_POST)
def post(self, request, pk, *args, **kwargs):
follow = UserFollows.objects.filter(user=request.user, followed_user_id=pk).first()
# Check if the following relationship is found.
if follow:
# Save the followed user's username for use in the message.
followed_username = follow.followed_user.username
# Delete the following relationship.
follow.delete()
# Send a success message to the user.
messages.success(request, f"You have unfollowed {followed_username}.")
else:
# If the relationship is not found, send an error message to the user.
messages.error(request, "User not found.")
# Redirect the user back to the 'abonnements' page.
return redirect('abonnements')
| ErnestoAquino/LITRevu | litrevu/users/views.py | views.py | py | 7,709 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.View",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "users.forms.LoginForm",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "users.forms",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "djan... |
1705824671 | import sys
import json
import h5py
import numpy as np
import matplotlib.pyplot as plt
import sys_id_utils
for i, data_file in enumerate(sys.argv[1:]):
data = h5py.File(data_file, 'r')
run_param = json.loads(data.attrs['jsonparam'])
print(run_param)
t = data['t'][()]
v_stimu = data['v_stimulus'][()]
v_plant = data['v_plant'][()]
v_error = data['v_error'][()]
is_trial = data['is_trial'][()]
stimu_count = data['stimulus_count'][()]
stimu_event = data['stimulus_event'][()]
# Mask of trial region
mask = is_trial > 0
t = t[mask]
v_stimu = v_stimu[mask]
v_plant = v_plant[mask]
v_error = v_error[mask]
stimu_count = stimu_count[mask]
stimu_event = stimu_event[mask]
# Remove last few points
k = 3
t = t[:-k]
v_stimu = v_stimu[:-k]
v_plant = v_plant[:-k]
v_error = v_error[:-k]
stimu_count = stimu_count[:-k]
stimu_event = stimu_event[:-k]
num_pts = t.shape[0]
nperseg = num_pts/12
f_sample = 1.0/(t[1] - t[0])
f_cutoff = 0.7
# Compute gain and phase as funtion of frequency
f, gain_db, phase_deg = sys_id_utils.freq_response(v_stimu[:,0], v_plant[:,0], f_sample, f_cutoff, nperseg)
if i==0:
fig0, ax0 = plt.subplots(3,1,sharex=True)
ax0[0].plot(t, v_stimu[:,0],'b')
ax0[0].plot(t, v_plant[:,0],'r')
ax0[0].set_ylabel('vel (pix/sec)')
ax0[0].grid(True)
ax0[1].plot(t, v_error[:,0],'b')
ax0[1].grid(True)
ax0[1].set_ylabel('err (pix/sec)')
ax0[2].plot(t, stimu_count)
ax0[2].grid(True)
ax0[2].set_xlabel('t (sec)')
if i==0:
fig1, ax1 = plt.subplots(2,1,sharex=True)
fig1.suptitle('Frequency Response')
ax1[0].semilogx(f, gain_db,'or')
ax1[0].grid(True, which='both', axis='both')
ax1[0].set_ylabel('gain (dB)')
ax1[1].semilogx(f, phase_deg,'or')
ax1[1].grid(True, which='both', axis='both')
ax1[1].set_ylabel('phase lag (deg)')
ax1[1].set_xlabel('f (Hz)')
plt.show()
| willdickson/imafly | python/imafly/examples/data_step_tmp/analyze_step_data.py | analyze_step_data.py | py | 2,044 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "h5py.File",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys_id_utils.freq_response",
"l... |
27998557212 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 2 14:50:05 2021
@author: mizo_
"""
import os
from PIL import Image
import numpy as np
import csv
from impreproc5 import processImg
# image =Image.open('test/test.png')
# z='test/resize/testresize.png'
# c=processImg(image,z)
c=0
directory = f'test/done'
z='test/resize/testresize.png'
result = []
with open('testcsv3.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
for filename in os.listdir(directory):
fn = os.path.join(directory, filename)
# checking if it is a file
if os.path.isfile(fn):
print(c, fn)
image=Image.open(fn)
image=processImg(image,z)
a=np.array(image).astype(np.uint8)
a= a.flatten()
#print(a)
a=a/255
print(a.shape)
#a=np.transpose(a, axes=None)
writer.writerow(a)
result.append(fn)
c+=1
print(result) | moataz-abbas/NeuralNetworks | createTestCSV.py | createTestCSV.py | py | 1,101 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.writer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
... |
18972684429 | import pandas as pd
from dagster import asset, get_dagster_logger
from SSH_DEMO.resources import daily_partitions_def
# path for the directory as served from the SFTP server
GLOBAL_PREFIX = "upload"
DB_ZONE = "landing"
def _source_path_from_context(context):
return (
context.solid_def.output_defs[0].metadata["source_file_base_path"]
+ "/"
+ context.partition_key
+ "/"
+ context.solid_def.output_defs[0].metadata["source_file_name"]
)
def read_csv_sftp_direct(sftp, remotepath: str, partition_key: str, *args, **kwargs) -> pd.DataFrame:
"""
Read a file from a remote host using SFTP over SSH.
Args:
sftp: the already initialized paramikro SFTP session
remotepath: the file path on the remote to read
partition_key: the key of the processed partition
*args: positional arguments to pass to pd.read_csv
**kwargs: keyword arguments to pass to pd.read_csv
Returns:
a pandas DataFrame with data loaded from the remote host
"""
remote_file = sftp.open(remotepath)
dataframe = pd.read_csv(remote_file, *args, **kwargs)
dataframe['event_dt'] = partition_key
now_ts = pd.Timestamp.now()
dataframe['load_ts'] = now_ts
remote_file.close()
sftp.close()
return dataframe
@asset(
compute_kind="python",
partitions_def=daily_partitions_def,
metadata={"source_file_base_path": GLOBAL_PREFIX, "source_file_name": "foo.csv", "db_zone": DB_ZONE},
required_resource_keys={"credentials", "ssh"},
# io_manager_key="parquet_io_manager"
)
def foo_asset(context):
path = _source_path_from_context(context)
get_dagster_logger().info(f"Processing file '{path}'")
ssh = context.resources.ssh
sftp = ssh.open_sftp()
df = read_csv_sftp_direct(sftp, path, context.partition_key)
return df
@asset(
compute_kind="python",
partitions_def=daily_partitions_def,
metadata={"source_file_base_path": GLOBAL_PREFIX, "source_file_name": "bar.csv", "db_zone": DB_ZONE},
required_resource_keys={"credentials", "ssh"},
# io_manager_key="parquet_io_manager"
)
def bar_asset(context):
return _shared_helper(context)
@asset(
compute_kind="python",
partitions_def=daily_partitions_def,
metadata={"source_file_base_path": GLOBAL_PREFIX, "source_file_name": "baz.csv", "db_zone": DB_ZONE},
required_resource_keys={"credentials", "ssh"},
# io_manager_key="parquet_io_manager"
)
def baz_asset(context):
return _shared_helper(context)
def _shared_helper(context):
path = _source_path_from_context(context)
get_dagster_logger().info(f"Shared processing file '{path}'")
ssh = context.resources.ssh
sftp = ssh.open_sftp()
df = read_csv_sftp_direct(sftp, path, context.partition_key)
return df
| geoHeil/dagster-ssh-demo | SSH_DEMO/assets/ingest_assets.py | ingest_assets.py | py | 2,839 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp.now",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pandas.Dat... |
35707696287 | import bcrypt
import time
from flask import Flask, jsonify, request
from flask import Flask, jsonify
from flask_cors import CORS
# * ============ (Core functions) ============ *#
from utils.save_results_in_db import save_results_in_db
from utils.scan_for_vulns import scan_for_vulns
from utils.data_adapter import data_adapter
from utils.save_results_as_json import save_results_as_json
from utils.obtain_cve_info_from_api import obtain_cve_info_from_api
from utils.get_default_gateway import get_default_gateway
from utils.db_connection import db_connection
from utils.get_db_results import get_db_results
from utils.get_db_results_filter import get_db_results_filter
from utils.obtain_isp_info_from_api import obtain_isp_info_from_api
from utils.obtain_user_collection import obtain_user_collection
from utils.get_db_reports import get_db_reports
# * ========= API ========= *#
from api.reports.get_top_cve import get_top_cve
from api.reports.get_top_isp import get_top_isp
from api.reports.get_top_vendor import get_top_vendor
from api.reports.get_top_vendor_cve import get_top_vendor_cve
from api.reports.get_top_ip import get_top_ip
from api.reports.get_top_isp_cve import get_top_isp_cve
from api.reports.get_top_port_cve import get_top_port_cve
from api.reports.get_top_ip_scanning_time import get_top_ip_scanning_time
app = Flask(__name__)
CORS(app)
@app.route("/")
def index():
return "Hello World!"
@app.route("/scan", methods=["POST"])
def scan():
userId = request.get_json()["userId"]
gateway = get_default_gateway()
start_time = time.time()
scan_results = scan_for_vulns(gateway, "nmap -sV --script vulners")
save_results_as_json(scan_results, "1-scan_results.json")
scan_results_adapted = data_adapter(scan_results, gateway, userId)
scan_results_adapted = obtain_isp_info_from_api(scan_results_adapted)
collection = db_connection()
if len(scan_results_adapted["vulnerabilities"]) == 0:
# save_results_in_db(collection, scan_results_adapted)
end_time = time.time()
elapsed_time = end_time - start_time
scan_results_adapted["scanningTime"] = elapsed_time
save_results_as_json(scan_results_adapted, "2-scan_results_adapted.json")
save_results_in_db(collection, scan_results_adapted)
return jsonify(scan_results_adapted)
scan_results_adapted_cve_info = obtain_cve_info_from_api(scan_results_adapted)
end_time = time.time()
elapsed_time = end_time - start_time
scan_results_adapted_cve_info["scanningTime"] = elapsed_time
save_results_as_json(
scan_results_adapted_cve_info, "3-scan_results_adapted_cve_info.json"
)
save_results_in_db(collection, scan_results_adapted_cve_info)
return jsonify(scan_results_adapted_cve_info)
@app.route("/scan/all")
def getAllScans():
collection = db_connection()
results = get_db_results(collection)
return results
@app.route("/scan/filter", methods=["POST"])
def getScanByFilter():
collection = db_connection()
results = get_db_results_filter(collection)
return results
@app.route("/register", methods=["POST"])
def register_user():
try:
users_collection = obtain_user_collection()
user_data = request.get_json()
existent_user = users_collection.find_one({"email": user_data["email"]})
if existent_user:
return jsonify({"error": "El Usuario ya existe"}), 400
hashed_password = bcrypt.hashpw(
user_data["password"].encode("utf-8"), bcrypt.gensalt()
)
users_collection.insert_one(
{
"name": user_data["name"],
"email": user_data["email"],
"role": user_data["role"] if "role" in user_data else "USER",
"asn": user_data["asn"] if "asn" in user_data else None,
"password": hashed_password,
}
)
return jsonify({"message": "Usuario creado exitosamente"}), 201
except Exception as e:
print(e)
return jsonify({"error": "Error al crear el usuario"}), 500
@app.route("/login", methods=["POST"])
def login():
try:
# Obtiene los datos de inicio de sesión del cuerpo de la solicitud
login_data = request.get_json()
users_collection = obtain_user_collection()
# Busca el usuario en la base de datos por su correo electrónico
user = users_collection.find_one({"email": login_data["email"]})
if user:
# Compara la contraseña proporcionada con la contraseña almacenada en la base de datos
if bcrypt.checkpw(login_data["password"].encode("utf-8"), user["password"]):
return (
jsonify(
{
"message": "Inicio de sesión exitoso",
"user": {
"_id": str(user["_id"]),
"name": user["name"],
"email": user["email"],
"role": user["role"],
"asn": user["asn"],
},
}
),
200,
)
else:
return jsonify({"error": "Credenciales incorrectas"}), 401
else:
return jsonify({"error": "Usuario no encontrado"}), 404
except Exception as e:
print(e)
return jsonify({"error": "Error al iniciar sesión"}), 500
# Reports
@app.route("/reports")
def reports():
collection = db_connection()
results = get_db_reports(collection)
return results
@app.route("/reports/cve")
def api_get_top_cve():
return get_top_cve()
@app.route("/reports/ip")
def api_get_top_ip():
return get_top_ip()
@app.route("/reports/ip/scanning_time")
def api_get_top_ip_scanning_time():
return get_top_ip_scanning_time()
@app.route("/reports/isp")
def api_get_top_isp():
return get_top_isp()
@app.route("/reports/isp/cve")
def api_get_top_isp_cve():
return get_top_isp_cve()
@app.route("/reports/port/cve")
def api_get_top_port_cve():
return get_top_port_cve()
@app.route("/reports/vendor")
def api_get_top_vendor():
return get_top_vendor()
@app.route("/reports/vendor/cve")
def api_get_top_vendor_cve():
return get_top_vendor_cve()
if __name__ == "__main__":
app.run(debug=True, port=3000)
| JorgeAVargasC/router-scan-backend | app.py | app.py | py | 6,478 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "flask.request",
... |
33078595311 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import time
from threading import Thread
import requests
class DownloadHandler(Thread):
def __init__(self, url):
super().__init__()
self.url = url
def run(self):
filename = self.url[self.url.rfind('/') + 1:]
resp = requests.get(self.url)
file_path = '/local/path/' + filename
with open(file_path, 'wb') as f:
f.write(resp.content)
def main():
api_url = 'https://example.com/api'
resp = requests.get(api_url)
data_model = resp.json()
for mm_dict in data_model['newslist']:
url = mm_dictp['picUrl']
DownloadHandler(url).start()
if __name__ == '__main__':
main()
| letterli/py-cookbook | books/python-100-days/Day14/requests_demo.py | requests_demo.py | py | 733 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "threading.Thread",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
}
] |
1047110963 | from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
class ticketChatForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ticketChatForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
# self.helper.form_id = 'id-exampleForm'
# self.helper.form_class = 'blueForms'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.add_input(Submit('отправить', 'Отправить'))
post = forms.CharField(widget=forms.HiddenInput(), )
name = forms.CharField(widget=forms.HiddenInput())
body = forms.CharField(label='Сообщения')
file = forms.FileField(label='Файл', max_length=100, required=False)
def setF(self, post, name):
self.fields['post'].initial = str(post)
self.fields['name'].initial = str(name)
return True
| hewimetall/django_Help_Desk | label_ListPage/form.py | form.py | py | 931 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.Form",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "crispy_forms.helper.FormHelper",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "crisp... |
2018421498 | import unittest
import sys
import os
import tempfile
import shutil
from appliapps.examples.a_pyecho import PythonEcho
from appliapps.examples.b_extecho import ExternalEcho
from appliapps.examples.cp import CpApp
from appliapps.examples.template import TemplateApp
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tdir = tempfile.mkdtemp(dir=".")
os.chdir(cls.tdir)
with open("testfile", "w") as f:
f.write("testcontent")
def test1_pyecho(self):
sys.argv = ['--COMMENT', 'comment']
PythonEcho.main()
def test2_extecho(self):
sys.argv = ['--COMMENT', 'comment']
ExternalEcho.main()
def test3_cp(self):
sys.argv = ["--FILE", "testfile"]
CpApp.main()
os.chmod("testfile", 000)
self.assertRaises(SystemExit, CpApp.main)
os.chmod("testfile", 644)
def test4_tpl(self):
sys.argv = ['--COMMENT', 'comment', '--WORKDIR', '.']
TemplateApp.main()
assert os.path.exists("template_out.tpl")
@classmethod
def tearDownClass(cls):
os.chdir("..")
shutil.rmtree(cls.tdir) | lcb/applicake | tests/test_examples.py | test_examples.py | py | 1,160 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line... |
27147516534 | import pytest
from ..common_imports import PdfXmp, PdfResource
class TestPdfXmp:
@pytest.fixture
def resource(self, test_params):
return PdfResource(test_params.resources_path + "XmpAndOtherSample.pdf", "XmpAndOtherSample.pdf")
@pytest.fixture
def text(self, resource, test_params, get_endpoint):
text = PdfXmp(resource)
return get_endpoint(text, test_params)
def test_pdf_xmp(self, text, test_params):
res = text.process()
if res.is_successful:
with open(test_params.output_path + "pdf_xmp.xml", "wb") as out_stream:
out_stream.write(res.content)
assert res.is_successful
| dynamicpdf-api/python-client | test/PdfXmpEndpoint/test_pdf_xmp.py | test_pdf_xmp.py | py | 688 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "common_imports.PdfResource",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "common_imports.PdfXmp",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "py... |
10854990799 | import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import Dataset, DataLoader
from utils import Language
SRC_LANG = Language('src')
TRG_LANG = Language('trg')
class SentenceDataset(Dataset):
"""
This class loads the desired data split for the Occupation Classification dataset
"""
def __init__(self, task, num_train, batch_size, data_path, dataset, debug=False):
"""
Args:
"""
self.batch_size = batch_size
self.src_file = data_path + dataset + "." + task + '.src'
self.trg_file = data_path + dataset + "." + task + '.trg'
src_sentences = open(self.src_file).readlines()
trg_sentences = open(self.trg_file).readlines()
self.alignment_file = data_path + dataset + "." + task + ".align"
alignment_sentences = open(self.alignment_file).readlines()
if debug: # small scale
src_sentences = src_sentences[:int(1e5)]
trg_sentences = trg_sentences[:int(1e5)]
alignment_sentences = alignment_sentences[: int(1e5)]
if dataset == 'train':
src_sentences = src_sentences[:num_train]
trg_sentences = trg_sentences[:num_train]
alignment_sentences = alignment_sentences[:num_train]
# parallel should be at least equal len
assert (len(src_sentences) == len(trg_sentences))
self.samples = []
self.src_samples = []
self.trg_samples = []
self.aligned_outputs = []
# represent all sentences
for idx in range(0, len(src_sentences)):
# get the slice
src_sample = SRC_LANG.get_sent_rep(src_sentences[idx])
trg_sample = TRG_LANG.get_sent_rep(trg_sentences[idx])
align_sample = alignment_sentences[idx]
self.src_samples.append(src_sample)
self.trg_samples.append(trg_sample)
self.aligned_outputs.append(align_sample)
# represent them
# src_sample = [SRC_LANG.get_sent_rep(s) for s in src_sample]
# trg_sample = [TRG_LANG.get_sent_rep(s) for s in trg_sample]
# sort by decreasing source len
sorted_ids = sorted(enumerate(self.src_samples), reverse=True, key=lambda x: len(x[1]))
src_sample = [self.src_samples[i] for i, v in sorted_ids]
trg_sample = [self.trg_samples[i] for i, v in sorted_ids]
align_sample = [self.aligned_outputs[i] for i, v in sorted_ids]
src_len = [len(s) for s in src_sample]
trg_len = [len(t) for t in trg_sample]
# large set seq len
max_src_len = max(src_len)
max_trg_len = max(trg_len)
# pad the extra indices
src_sample = SRC_LANG.pad_sequences(src_sample, max_src_len)
trg_sample = TRG_LANG.pad_sequences(trg_sample, max_trg_len)
# generated masks
aligned_outputs = []
for alignment in align_sample:
# print (alignment)
current_alignment = np.zeros([max_trg_len, max_src_len])
for pair in alignment.strip().split():
src_i, trg_j = pair.split("-")
src_i = min(int(src_i) + 1, max_src_len - 1)
trg_j = min(int(trg_j) + 1, max_trg_len - 1)
current_alignment[trg_j][src_i] = 1
aligned_outputs.append(current_alignment)
# numpy them
self.src_samples = np.array(src_sample, dtype=np.int64)
self.trg_samples = np.array(trg_sample, dtype=np.int64)
self.aligned_outputs = np.array(aligned_outputs)
# align output is batch_size x max target_len x max_src_len
assert (self.src_samples.shape[1] == max_src_len)
assert (self.trg_samples.shape[1] == max_trg_len)
# craft samples out of prepared data
for idx in range(0, len(self.src_samples)):
src_sample = self.src_samples[idx]
trg_sample = self.trg_samples[idx]
self.samples.append([src_sample, len(src_sample), trg_sample, len(trg_sample), self.aligned_outputs[idx]])
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return self.samples[idx]
class SentenceDataModule(pl.LightningDataModule):
"""
This Lightning module takes a "task" argument and produces DataLoaders for that task
using predefined task-Dataset instances.
"""
def __init__(self, task, batch_size, num_train, data_path, debug=False):
super().__init__()
self.task = task
self.batch_size = batch_size
self.num_train = num_train
self.debug = debug
self.data_path = data_path
# noinspection PyAttributeOutsideInit
def setup(self, stage=None):
self.train = SentenceDataset(self.task, self.num_train, self.batch_size, self.data_path, 'train', debug=self.debug)
# don't accept new words from validation and test set
SRC_LANG.stop_accepting_new_words()
TRG_LANG.stop_accepting_new_words()
self.val = SentenceDataset(self.task, self.num_train, self.batch_size, self.data_path, 'dev', debug=self.debug)
self.test = SentenceDataset(self.task, self.num_train, self.batch_size, self.data_path, 'test', debug=self.debug)
def train_dataloader(self):
return DataLoader(self.train, batch_size=self.batch_size, num_workers=4)
def val_dataloader(self):
return DataLoader(self.val, batch_size=self.batch_size, num_workers=4)
def test_dataloader(self, batch_size=None):
if batch_size is None:
batch_size = self.batch_size
# pin_memory=True
return DataLoader(self.test, batch_size=batch_size, num_workers=4)
def prepare_data(self, *args, **kwargs):
# download or similar ...
pass
| matprst/deceptive-attention-reproduced | deceptive-attention/src/seq2seq/lightning/data_utils.py | data_utils.py | py | 5,858 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "utils.Language",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "utils.Language",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
... |
32833821340 | from appium import webdriver
import time
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import ElementNotVisibleException, ElementNotSelectableException, NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '9'
desired_caps['automationName'] = 'UiAutomator2'
desired_caps['deviceName'] = 'moto x4'
desired_caps['app'] = ('/home/candi/Downloads/PgConnect_release_1.7.0_270820_1004.apk')
desired_caps['appPackage'] = 'de.proglove.connect'
desired_caps['appActivity'] = 'de.proglove.connect.app.main.MainActivity'
driver = webdriver.Remote("http://localhost:4723/wd/hub", desired_caps)
print("Device Width and Height: ", driver.get_window_size())
#Device Width and Height: {'width': 1080, 'height': 1776}
deviceSize = driver.get_window_size()
screenWidth = deviceSize['width']
screenHeight = deviceSize['height']
#Swipe from Buttom to Top
startx = screenWidth/2
endsx = screenWidth/2
starty = screenHeight*8/9
endsy = screenHeight/9
#Swipe from Top to Buttom
startx2 = screenWidth/2
endsx2 = screenWidth/2
starty2 = screenHeight*2/9
endsy2 = screenHeight*8/9
actions = TouchAction(driver)
actions.long_press(None, startx, starty).move_to(None, endsx, endsy).release().perform()
time.sleep(3)
actions.long_press(None, startx2, starty2).move_to(None, endsx2, endsy2).release().perform()
| candi-project/Automation_framework_Android | Appiumpython/Gestures/SwipeGesture2.py | SwipeGesture2.py | py | 1,460 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "appium.webdriver.Remote",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "appium.webdriver",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "appium.webdriver.common.touch_action.TouchAction",
"line_number": 38,
"usage_type": "call"
},
... |
26024158970 | # 建立COO 稀疏矩阵
from scipy.sparse import coo_matrix # 引入所需要的库
row = [0, 1, 2, 2]
col = [0, 1, 2, 3]
data = [1, 2, 3, 4] # 建立矩阵的参数
c = coo_matrix((data, (row, col)), shape=(4, 4)) # 构建4*4的稀疏矩阵
print(c)
d = c.todense() # 稀疏矩阵转化为密集矩阵
print(d)
e = coo_matrix(d) # 将一个0值很多的矩阵转为稀疏矩阵
print(e)
f = e.tocsr() # 将COO 稀疏矩阵转化为CSR稀疏矩阵
print(f)
print("\n")
g = e.tocsc() # 将COO 稀疏矩阵转化为CSC稀疏矩阵
print(g)
| suanhaitech/pythonstudy2023 | july/11.py | 11.py | py | 584 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "scipy.sparse.coo_matrix",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.coo_matrix",
"line_number": 11,
"usage_type": "call"
}
] |
71276865469 | # internal imports
from typing import Dict, Optional
# external imports
import gspread
def add_new_row(sheet, data):
sheet.append_row(data)
def update_row(sheet, cell, data):
for idx, d in enumerate(data):
sheet.update_cell(cell.row, cell.col + idx, data[idx])
def upload_results(sheet_name: str, exp_name: str, results: Dict[str, int], worksheet_name: Optional[str] = None) -> None:
"""
Upload the results to googlesheets. If no row with the exp_name
exists, then a new row will be added. If the experiment does
exist, the row will simply be updated.
"""
gc = gspread.service_account()
sh = gc.open(sheet_name)
if worksheet_name is None:
worksheet_name = sh.sheet1.title
ws = sh.worksheet(worksheet_name)
data = [exp_name] + [v for v in results.values()]
try:
cell = ws.find(exp_name)
update_row(ws, cell, data)
except gspread.CellNotFound:
add_new_row(ws, data) | jaypmorgan/labscribe | labscribe/googlesheets.py | googlesheets.py | py | 968 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "gspread.service_account",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "gspread.CellNotFo... |
41191258670 |
#? pip install flask flask-pymongo
from flask import Flask, render_template
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config['MONGO_URI'] = "mongodb://localhost:27017/myDatabase"
mongo = PyMongo(app)
@app.route('/')
def hello_world():
mongo.db.inventory.insert_one({"b":31})
a = mongo.db.inventory.find({})
return render_template('index.html',data=a)
@app.route('/mydata')
def mydata():
info = ['Vedant', 'Age: 19', 'Programmer', 'Music Lover']
return render_template('mydata.html', personal=info)
app.run(debug=True,port=3000) | Vedant817/Flask-and-MongoDB | main.py | main.py | py | 569 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.render_te... |
71601274107 | #!/usr/bin/env python3
from jinja2 import Template
import numpy as np
min_x = -20
max_x = 20
min_z = 0.0
max_z = 20.0
with open('nonapod_input.jinja') as template_file:
templ = Template(template_file.read())
# Do the cases for grid sampling. Since 50 and 500 are not perfect squares,
# must use an approximate number.
x_values = np.linspace(min_x, max_x, 100)
z_values = np.linspace(min_z, max_z, 100)
fh = open('nonapod_inputs_grid_many/input_list', 'w')
for i, x in enumerate(x_values):
for j, z in enumerate(z_values):
with open('nonapod_inputs_grid_many/x_%i_z_%i'%(i,j), 'w') as result:
result.write(templ.render(x=x, z=z))
fh.write('%i %i %s\n' %(i, j, 'x_%i_z_%i'%(i,j)))
fh.close()
| gridley/truss_optimization | write_big_grid.py | write_big_grid.py | py | 730 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "jinja2.Template",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 16,
"usage_type": "call"
}
] |
24615532465 | from tiles import AnimatableTile
import pygame
class Coin(AnimatableTile):
def __init__(self, size, position, frames, data):
super().__init__(size, position, frames, data)
for i in range(len(self.frames)):
self.frames[i] = pygame.transform.scale(self.frames[i], (8, 8))
self.position.x += size / 2
self.position.y += size / 2
def live(self, dt, surface):
self.animate(dt)
self.draw(surface)
| ysbrandB/M6FinalProject | code/coin.py | coin.py | py | 462 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tiles.AnimatableTile",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pygame.transform.scale",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 9,
"usage_type": "attribute"
}
] |
74667821627 | from typing import List, Optional
from fastapi import Depends
from ..service import Service, get_service
from app.utils import AppModel
from . import router
class InsideObjectResponse(AppModel):
_id:str
address:str
type:str
price:int
area:float
rooms_count:int
location:dict
class GenResponse(AppModel):
total:int
objects: List[InsideObjectResponse]
@router.get("/shanyraks")
def get_shanyraks(
limit:int,
offset:int,
type:Optional[str]=None,
rooms_count:Optional[int]=None,
price_from:Optional[int]=None,
price_until:Optional[int]=None,
svc : Service = Depends(get_service)
):
val = svc.repository.pagination(limit, offset, type, rooms_count, price_from, price_until)
return GenResponse(**val) | MamushevArup/code-climb-ai-back | app/shanyrak/router/router_get_pagination.py | router_get_pagination.py | py | 770 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.utils.AppModel",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.utils.AppModel",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
... |
22050926816 | from flask import Flask, request, render_template, session, redirect, url_for, jsonify
from models.user import User
from models.rawpicture import Rawpicture
from models.savepicture import Savepicture
from models.comment import Comment
from random import choice
import mlab
import base64
import requests
mlab.connect()
def base64encode(url):
link1 = base64.b64encode(requests.get(url).content)
link2 = str(link1)
link = link2.replace("b'","data:image/jpeg;base64,").replace("'","")
return link
def func_top100pics():
# Tìm tất cả những bức tranh đã hoàn thành:
finished_list = Savepicture.objects(picstatus='finished', piclikes__ne=0)
# Tìm 100 bức có số like lớn nhất và lưu số likes đó vào 1 list:
likes_list = []
for pic in finished_list:
likes_list.append(pic.piclikes)
likes_list.sort(reverse=True) # sắp xếp theo thứ tự giảm dần
if len(likes_list) > 100:
likes_list = likes_list[:101]
likes_list = list(dict.fromkeys(likes_list)) # loại bỏ các giá trị trùng nhau
# Tạo Top 100 bằng cách tìm ngược likes trong list trên ở database ảnh:
top100pics = []
for i, v in enumerate(likes_list):
for pic in finished_list:
if pic.piclikes == v:
Savepicture.objects(id=pic.id).first().update(set__picpositionintop100=i+1)
artist = User.objects(username=pic.picartist).first()
toppic = {
'picpositionintop100': pic.picpositionintop100,
'picname': pic.picname,
'piclink': pic.piclink,
'piclikes': pic.piclikes,
'picartist': artist.fullname,
'username': artist.username,
'picid': pic.id
}
top100pics.append(toppic)
return top100pics
# Các biến được dùng để hiển thị trên HTML:
# 1. Tên bức tranh: picname
# 2. Link ảnh để hiển thị ảnh: piclink
# 3. Số lượng like: piclikes
# 4. Tác giả: picartist
def func_top100artists():
# Tìm tất cả các artist:
artist_list = User.objects(totallikes__ne=0)
# Tìm 100 artist có likes lớn nhất và lưu số like đó vào 1 list:
likes_list = []
for artist in artist_list:
likes_list.append(artist.totallikes)
likes_list.sort(reverse=True) # sắp xếp theo thứ tự giảm dần
if len(likes_list) > 100:
likes_list = likes_list[:101]
likes_list = list(dict.fromkeys(likes_list)) # loại bỏ các giá trị trùng nhau
# Tạo top 100 Artist bằng cách tìm ngược likes trong database user:
top100artists = []
for i, v in enumerate(likes_list):
for artist in artist_list:
if artist.totallikes == v:
# Update Position trong top 100:
User.objects(username=artist.username).first().update(set__positionintop100=i+1)
# Số tranh đã hoàn thành:
finished_list = Savepicture.objects(picartist=artist.username, picstatus='finished')
# # Số tranh trong top 100 pics:
# picsintop100 = 0
# top100pics = func_top100pics()
# for pic in top100pics:
# if pic['picartist'] == artist.username:
# picsintop100 += 1
# User.objects(username=artist.username).first().update(set__picsintop100=picsintop100)
# Tìm bức tranh có nhiều like nhất của artist đó:
likes = []
for pic in finished_list:
likes.append(pic.piclikes)
bestpic = Savepicture.objects(picartist=artist.username, picstatus='finished', piclikes=max(likes)).first()
# Đưa các thông tin của artist đó vào list top 100 artist:
topartist = {
'positionintop100': artist.positionintop100,
'fullname': artist.fullname,
'username': artist.username,
# 'picsintop100': picsintop100,
'totallikes': artist.totallikes,
# 'finishedarts': len(finished_list),
'bestpic': bestpic.piclink,
'bestpicid': bestpic.id
}
top100artists.append(topartist)
return top100artists
# Các biến dùng để hiển thị trên HTML:
# 1. Thứ hạng của artist: positionintop100
# 2. Tên đầy đủ của artist: fullname
# 3. Số lượng pic nằm trong top100pics: picsintop100
# 4. Tổng like: totallikes
# 5. Số bức vẽ đã hoàn thành: finishedarts
# 6. Link bức vẽ được nhiều like nhất để hiển thị: bestpic
def func_artist_infor(artist):
# Fullname của artist:
artist_fullname = User.objects(username=artist).first().fullname
# Số bức tranh đã hoàn thành:
finished_list = Savepicture.objects(picartist=artist, picstatus='finished')
finished_arts = len(finished_list)
# Số bức tranh đang làm dở:
working_list = Savepicture.objects(picartist=artist, picstatus='working')
working_arts = len(working_list)
# Tính tổng like của artist:
##### Liệu có cách nào tự động kết nối data user vs data picture để tự tính tổng like?
totallikes = 0
for art in finished_list:
totallikes += art.piclikes
# # Tổng số bức tranh trong top 100 pics:
# picsintop100 = 0
# top100pics = func_top100pics()
# for pic in top100pics:
# if pic['picartist'] == artist:
# picsintop100 += 1
# User.objects(username=artist).first().update(set__picsintop100=picsintop100)
# # Tìm thứ hạng trong top 100:
# positionintop100 = 0
# top100artists = func_top100artists()
# for a in top100artists:
# if a['username'] == artist:
# positionintop100 = a['positionintop100']
# Tạo 1 dictionary lưu thông tin của artist:
artist_infor = {
'fullname': artist_fullname,
'username': artist,
'finished_arts': finished_arts,
'working_arts': working_arts,
'totallikes': totallikes,
# 'picsintop100': picsintop100,
# 'positionintop100': positionintop100
}
return artist_infor
# Thông tin của artist:
# - Tên đầy đủ của artist: fullname
# - Số bức tranh đã hoàn thành: finished_arts
# - Số bức tranh đang vẽ: working_arts
# - Tổng số likes của artist đó: totallikes
# - Bỏ: Số bức tranh trong top 100: picsintop100 (bằng 0 là không có bức nào)
# - Bỏ: Thứ hạng của artist: positionintop100 (bằng 0 là không được vào top)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'teamcolorpictures'
@app.route('/') # Hiển thị trang chủ
def home():
return render_template('homepage.html')
@app.route('/signup', methods=['GET', 'POST']) # Đăng ký tài khoản
def signup():
if 'token' in session:
return render_template('homepage.html')
if request.method == 'GET':
return render_template("signup.html")
else:
form = request.form
f = form['fullname']
u = form['username']
p = form['password']
# e = form['email']
new_user = User(fullname=f, username=u, password=p) #, email=e)
user_check = User.objects(username=u).first()
# email_check = User.objects(email=e).first()
warning = ''
if f == '' or u == '' or p == '': #or e == '':
warning = 'Vui lòng nhập đầy đủ thông tin!'
elif ' ' in u or ' ' in p:
warning = 'Username hoặc password không được chứa dấu cách!'
# Check xem có tồn tại username hoặc email đó chưa:
elif user_check is not None:
warning = 'Username đã tồn tại!'
# elif email_check is not None:
# warning = 'Email đã tồn tại'
if warning != '':
return render_template('signup.html', warning=warning)
else:
new_user.save()
session['token'] = u
# Đăng ký xong thì trả về giao diện trang Welcome
return render_template('welcome.html', fullname=f, u=u)
@app.route('/login', methods=['GET', 'POST']) # Đăng nhập
def login():
if 'token' in session:
return render_template('homepage.html')
if request.method == 'GET':
return render_template('login.html')
else:
form = request.form
u = form['username']
p = form['password']
user_check = User.objects(username=u).first()
# Check xem có nhập username và password hay không và nhập đúng hay không:
warning = ''
if u == '':
warning = 'Bạn chưa nhập username!'
elif user_check is None:
warning = 'Username không tồn tại!'
else:
if p == '':
warning = 'Vui lòng nhập password!'
elif p != user_check.password:
warning = 'Password sai!'
if warning != '':
return render_template('login.html', warning=warning)
else:
session['token'] = u
# Đăng nhập đúng thì trả về giao diện trang Welcome
return render_template('welcome.html', fullname=User.objects(username=u).first().fullname, u=u)
@app.route('/logout') # Đăng xuất
def logout():
if 'token' in session:
del session['token']
return redirect(url_for('home'))
@app.route('/top100pics') # Hiển thị 100 Pics đc nhiều like nhất
def top100pics():
top100pics = func_top100pics()
return render_template('top100pics.html', top100pics=top100pics)
@app.route('/top100artists') # Hiển thị 100 Artists đc nhiều like nhất
def top100artists():
top100artists = func_top100artists()
return render_template('top100artists.html', top100artists=top100artists)
@app.route('/profile/<artist>') # Hiển thị profile
def profile(artist):
# Chạy hàm func_artist_infor và trả về các thông tin của artist đó
artist_infor = func_artist_infor(artist)
# Các bức tranh đã hoàn thành sắp xếp theo số lượng like:
# Tạo 1 list gồm số like của các bức tranh của artist đó
likes_list = []
finished_list = Savepicture.objects(picartist=artist, picstatus='finished')
for pic in finished_list:
likes_list.append(pic.piclikes)
likes_list.sort(reverse=True)
likes_list = list(dict.fromkeys(likes_list)) # loại bỏ các giá trị trùng nhau
# Tạo 1 list các bức tranh sắp xếp theo số lượng like để sau đó hiển thị trên trang profile của artist
artist_finised_arts = []
for i in likes_list:
for pic in finished_list:
if pic.piclikes == i:
# # Tìm thứ hạng của pic đó trong top 100 pics nếu có:
# top100pics = func_top100pics()
# positionintop100 = 0
# for toppic in top100pics:
# if toppic['picid'] == pic.id:
# positionintop100 = toppic['picpositionintop100']
# Tìm số lượng comment trong bức tranh đó:
comments = len(Comment.objects(picid=pic.id))
# Đưa các thông tin của các bức vẽ vào list các bức vẽ của artist đó
toppic = {
# 'positionintop100': positionintop100,
'picname': pic.picname,
'piclink': pic.piclink,
'piclikes': pic.piclikes,
'picid': pic.id,
'piccomments': comments
}
artist_finised_arts.append(toppic)
# Danh sách những bức đang vẽ (chỉ nhìn thấy của chính mình nếu đăng nhập vào)
working_list = []
if 'token' in session:
if session['token'] == artist:
working_list = Savepicture.objects(picartist=artist, picstatus='working')
return render_template('profile.html', artist_infor=artist_infor, artist_finised_arts=artist_finised_arts, working_list=working_list)
# Các biến được dùng để hiển thị trên HTML:
# 1. Thông tin của artist:
# - Tên đầy đủ của artist: artist_fullname
# - Số bức tranh đã hoàn thành: finished_arts
# - Số bức tranh đang vẽ: working_arts. (Cái này chỉ hiện ra nếu ở trang profile của mình, còn của người khác chỉ hiện finished_arts thôi)
# - Bỏ: Số bức tranh trong top 100: picsintop100 (bằng 0 là không có bức nào)
# - Bỏ: Thứ hạng trong 100 artist: positionintop100 (bằng 0 là không nằm trong danh sách)
# 2. Thông tin từng bức vẽ đã hoàn thành, bao gồm:
# - Bỏ: Thứ hạng trong top 100 pics nếu bức đó lọt vào: positionintop100
# - Tên bức tranh: picname
# - Link ảnh để hiển thị: piclink
# - Số lượng like: piclikes
# - Số lượng comment: piccomments
# Lấy link của 1 random pic:
pic_list = Rawpicture.objects()
random_picid = choice(pic_list).id
@app.route('/category') # Hiển thị trang Category tổng
def full_category():
# category_list = Rawpicture.objects() # Sau sẽ xử lý hiển thị tất cả các category trong html bằng vòng for
return render_template('category.html', random_picid=random_picid)
@app.route('/category/<category>') # Hiển thị 1 trang category cụ thể
def one_category(category):
pic_list = Rawpicture.objects(category__icontains=category)
cap_category = category.title()
return render_template('one_category.html', pic_list=pic_list, category=cap_category)
@app.route('/new_picture/<picid>') # Hiển thị trang vẽ tranh của 1 bức tranh
def new_picture(picid):
pic = Rawpicture.objects(id=picid).first()
piclinkb64 = base64encode(pic.piclink)
return render_template('new_picture.html', piclinkb64=piclinkb64)
@app.route('/view/<picid>', methods=['GET', 'POST']) # Hiển thị 1 bức tranh đã hoàn thành để like và comment:
def view(picid):
pic = Savepicture.objects(id=picid).first()
artist = User.objects(username=pic.picartist).first()
comment_list = Comment.objects(picid=picid)
if request.method == 'GET':
return render_template("view.html", pic=pic, artist=artist,comment_list=comment_list)
else:
form = request.form
comment = form['comment']
warning = ''
if 'token' in session:
user = User.objects(username=session['token']).first()
new_comment = Comment(comment=comment, who_fullname=user.fullname, who_username=user.username, picid=picid)
if comment == '':
warning = 'Bạn chưa viết gì nên không có gì để đăng!'
else:
new_comment.save()
else:
warning = 'Vui lòng đăng nhập để like & comment!'
return render_template('view.html', pic=pic, artist=artist, comment_list=comment_list, warning=warning)
@app.route('/like')
def index():
return render_template('like_test.html')
@app.route('/_get_data/', methods=['POST'])
def _get_data():
piclikes = 1
return jsonify({'data': piclikes})
if __name__ == '__main__':
app.run(debug=True) | hoangcuong9x/test | app.py | app.py | py | 16,081 | python | vi | code | 0 | github-code | 6 | [
{
"api_name": "mlab.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.savepicture.Savepic... |
30157505435 | from find_dir import cmd_folder
import pandas as pd
import os
import json
import numpy as np
buyer_history = pd.read_csv(cmd_folder+"data/processed/buyer_history.csv")
sorted_history = buyer_history[["buyer_id","visit_id","timestamp","event"]].sort_values(["buyer_id","visit_id","timestamp","event"],ascending=True)
sorted_history["regroup"] = False
total_pageview_chat = sorted_history["visit_id"][sorted_history["event"]=="pageview"].index.values.tolist()
total_pageview_chat.extend(sorted_history["visit_id"][sorted_history["event"]=="chat"].index.values.tolist())
unique = sorted_history["visit_id"][sorted_history["event"]=="pageview"].drop_duplicates().index.values.tolist()
unique.extend( sorted_history["visit_id"][sorted_history["event"]=="chat"].drop_duplicates().index.values.tolist())
duplicate_pageview_chat = list((set(total_pageview_chat) - set(unique)))
index_without_duplicates = list(set(sorted_history.index.values.tolist()) - set(duplicate_pageview_chat))
regroup_history= sorted_history[["buyer_id","timestamp","event"]].loc[index_without_duplicates]
with open(cmd_folder+"data/processed/trace_regroup.json","w") as buf:
buf.write("[")
buyers = regroup_history["buyer_id"].unique()
for buyer in buyers[0:buyers.size-1]:
buf.write("{\"id\":\""+buyer+"\",\"trace\":")
regroup_history[regroup_history["buyer_id"]==buyer][["event"]].to_json(path_or_buf=buf,orient="records",force_ascii=False)
buf.write("},\n")
buf.write("{\"id\":\""+buyer+"\",\"trace\":")
regroup_history[regroup_history["buyer_id"]==buyer][["event"]].to_json(path_or_buf=buf,orient="records",force_ascii=False)
buf.write("}]")
trace = json.load(open(cmd_folder+"data/processed/trace_regroup.json","r"))
| pierrenodet/PFE | src/make_trace_bis.py | make_trace_bis.py | py | 1,746 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "find_dir.cmd_folder",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "find_dir.cmd_folder",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "json.load",
... |
27318923223 | # @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import cv2
# This class is inspired by https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/TFLite_detection_webcam.py
class GenerateCarView:
def __init__(self):
self.frameRateCalc = 1
self.freq = cv2.getTickFrequency()
self.t1 = cv2.getTickCount()
def getFrame(self, image, pylons= None):
# self.addFrameRate(image)
if pylons is not None:
self.addBoxesToImage(image, pylons)
return image
def addFrameRate(self,image):
cv2.putText(image,'FPS: {0:.2f}'.format(self.frameRateCalc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
self.t2 = cv2.getTickCount()
self.frameRateCalc = self.freq/(self.t2-self.t1)
self.t1 = cv2.getTickCount()
def addBoxesToImage(self, image, pylons):
for pylone in pylons:
xmin = pylone['xmin']
ymin = pylone['ymin']
xmax = pylone['xmax']
ymax = pylone['ymax']
cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
label = pylone['label'] + ' %d%%' % (int(pylone['score']*100)) + ' ' + str(round(pylone['distanceAbsolut'],2)) + ' m'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(image, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(image, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
| iisys-hof/autonomous-driving | car-controller/src/mainController/View/Render/GenerateCarView.py | GenerateCarView.py | py | 2,531 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.getTickFrequency",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.getTickCount",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
... |
1701461424 | import argparse
import numpy as np
import cv2
import time
import math
from sympy.solvers import solve
from sympy import Symbol
X_POS = 0
Y_POS = 1
Thresh = 170
imageName = "picture.jpg"
def modImage(sceneName, img, kernel, erodeNum, dilateNum, invertion=False):
ret, result = cv2.threshold(img, Thresh, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
if(invertion):
result = cv2.bitwise_not(result)
result = cv2.erode(result, kernel, iterations=erodeNum)
result = cv2.dilate(result, kernel, iterations=dilateNum)
result = cv2.GaussianBlur(result, (5,5), 0)
return result
def searchBorder(img, numOfBorder):
result_point = []
myQ = []
height, width = img.shape[:2]
visited = [[False for rows in range(0, height)]for cols in range(0, width)]
#direction = [ [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1] ]
direction = [ [0, 1], [1, 1], [1, 0], [-1, 1], [-1, 0], [-1, 1], [0, -1], [1, -1]]
start_x = int(width / 2)
start_y = int(height / 2)
startBorder = False
borderCounter = 0
search_cursor_x = -1
search_cursor_y = -1
for y in range(start_y, 0, -1):
for x in range(start_x, 0, -1):
if(img[y][x] != 0 and not startBorder):
startBorder = True
search_cursor_x = x
search_cursor_y = y
borderCounter += 1
elif(img[y][x] != 0 and startBorder):
startBorder = False
borderCounter = 0
elif(img[y][x] == 0 and startBorder):
borderCounter += 1
if(startBorder and borderCounter > 10):
myQ.append([search_cursor_x, search_cursor_y])
while len(myQ) != 0:
point = myQ.pop(0)
try:
if(visited[point[Y_POS]][point[X_POS]]):
continue
except:
continue
visited[point[Y_POS]][point[X_POS]] = True
result_point.append(point)
if( len(result_point) >= numOfBorder ):
return result_point
test_border = False
temp_list = []
for dir in direction:
next_point = [ point[X_POS] + dir[X_POS], point[Y_POS] + dir[Y_POS] ]
try:
if(img[next_point[Y_POS]][next_point[X_POS]] == 0):
temp_list.append(next_point)
else:
test_border = True
except:
continue
if(test_border):
for temp_point in temp_list:
myQ.append(temp_point)
return result_point
def findCircleCenter(pointA, pointB, pointC):
x = Symbol('x')
y = Symbol('y')
AB_center_x = (pointA[X_POS] + pointB[X_POS])/2
AB_center_y = (pointA[Y_POS] + pointB[Y_POS])/2
AB_incline = (pointA[X_POS] - pointB[X_POS]) / (pointA[Y_POS] - pointB[Y_POS])
equation1 = AB_incline * x + y - AB_incline*AB_center_x - AB_center_y
AC_center_x = (pointA[X_POS] + pointC[X_POS])/2
AC_center_y = (pointA[Y_POS] + pointC[Y_POS])/2
AC_incline = (pointA[X_POS] - pointC[X_POS]) / (pointA[Y_POS] - pointC[Y_POS])
equation2 = AC_incline * x + y - AC_incline*AC_center_x - AC_center_y
result = solve( (equation1, equation2), dict=True)
temp_total = math.pow(result[0][x] - pointA[X_POS], 2) + math.pow(result[0][y] - pointA[Y_POS], 2)
radius = math.sqrt(temp_total)
return int(result[0][x]), int(result[0][y]), int(radius)
def findResult(pointList, rate):
unit_length = int(len(pointList) / 3)
total_length = int(len(pointList) - unit_length*2)
result = {}
for i in range(0, rate):
try:
x,y,radius = findCircleCenter(pointList[i], pointList[i+unit_length], pointList[i+unit_length*2])
if (x,y) in result:
result[(x,y)].append(radius)
else:
result[(x,y)] = [ radius ]
except:
continue
if(x < 0 or y < 0):
continue
if len(result) == 0:
return None, None, None
max_key = max(result, key=lambda p: len(result[p]))
max_value = result[max_key]
return int(max_key[0]), int(max_key[1]), int(sum(max_value) / float(len(max_value)))
def drawCircle(pointList, output_image, point_color, circle_color, rate):
unit_length = int(len(pointList) / 3)
total_length = int(len(pointList) - unit_length*2)
for i in range(0, rate):
try:
x,y,radius = findCircleCenter(pointList[i], pointList[i+unit_length], pointList[i+unit_length*2])
except:
continue
if(x < 0 or y < 0):
continue
cv2.circle(output_image, (x,y), radius, circle_color, 1)
cv2.rectangle(output_image, (x-2, y-2), (x+2, y+2), point_color, -1)
def getPupil(eye_img):
pupilImg = cv2.inRange(eye_img.copy(), (30,30,30), (80,80,80))
_, contours, __ = cv2.findContours(pupilImg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
del pupilImg
pupilImg = eye_img.copy()
for cnt in contours:
moments = cv2.moments(cnt)
area = moments['m00']
if (area > 50):
pupilArea = area
x = moments['m10']/area
y = moments['m01']/area
pupil = contours
global centroid
centroid = (int(x),int(y))
cv2.drawContours(pupilImg, pupil, -1, (0,255,0), -1)
break
return (pupilImg)
def irisDetect_debug(output, image, scale, rate):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
processed_img = getPupil(image.copy())
hsv = cv2.cvtColor(processed_img, cv2.COLOR_BGR2HSV)
(channel_h, channel_s, channel_v) = cv2.split(hsv)
cv2.imshow("hue", channel_h)
cv2.imshow("saturation", channel_s)
cv2.imshow("value", channel_v)
pupil = modImage("pu_man", channel_h, kernel, 5, 5)
iris = modImage("ir_man", channel_v, kernel, 8, 8, True)
cv2.imshow("pupil", pupil)
cv2.imshow("iris", iris)
pupil_point_list = searchBorder(pupil, scale)
iris_point_list = searchBorder(iris, scale)
if not pupil_point_list is None:
drawCircle(pupil_point_list, output, (255, 255, 0), (0, 255, 0), rate)
if not iris_point_list is None:
drawCircle(iris_point_list, output, (0, 255, 255), (255, 0, 0), rate)
def irisDetect(output, image, scale, rate):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
processed_img = getPupil(image.copy())
hsv = cv2.cvtColor(processed_img, cv2.COLOR_BGR2HSV)
(channel_h, channel_s, channel_v) = cv2.split(hsv)
pupil = modImage("pu_man", channel_h, kernel, 5, 5)
iris = modImage("ir_man", channel_v, kernel, 8, 8, True)
pupil_point_list = searchBorder(pupil, scale)
iris_point_list = searchBorder(iris, scale)
if not pupil_point_list is None:
x,y,radius = findResult(pupil_point_list, rate)
if x is not None:
cv2.circle(output, (x,y), radius, (0, 255, 0), 1)
cv2.rectangle(output, (x-2, y-2), (x+2, y+2), (255, 255, 0), -1)
"""
if not iris_point_list is None:
x,y,radius = findResult(iris_point_list, rate)
if x is not None:
cv2.circle(output, (x,y), radius, (255, 0, 0), 1)
cv2.rectangle(output, (x-2, y-2), (x+2, y+2), (0, 255, 255), -1)
"""
if __name__ == "__main__":
image = cv2.imread(imageName)
output = image.copy()
irisDetect(output, image, 1500, 30)
cv2.imshow("display", output)
cv2.waitKey(0)
if cv2.waitKey(1)&0xFF == ord('q'):
cv2.destroyAllWindows()
| Edwin222/CPL-20181-Team3 | iris_detect_service/iris_detection.py | iris_detection.py | py | 6,860 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.threshold",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_OTSU",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.bitwise... |
41636163192 | """Insert Noop: insert a statement that doesn't affect any other variables."""
from refactorings.base import BaseTransformation, JoernTransformation, SrcMLTransformation
from refactorings.random_word import get_random_word, get_random_typename_value
import string
from srcml import E
from lxml import etree
import logging
logger = logging.getLogger(__name__)
type_to_literaltype = {
"int": 'number',
"char": 'char',
"char *": 'string',
}
tagnames = ['expr_stmt', 'decl_stmt', 'for', 'do', 'while', 'if_stmt', 'switch', 'label']
class InsertNoop(SrcMLTransformation):
def get_targets(self, **kwargs):
targets = []
for tagname in tagnames:
targets += self.srcml.xp(f'//src:{tagname}')
return targets
def _apply(self, target):
new_name = get_random_word()
typename, value = get_random_typename_value()
literaltype = type_to_literaltype[typename]
new_decl_stmt = E.decl_stmt(
E.decl(
E.type(
E.name(typename, ' '),
E.name(new_name, ' '),
E.init(
'= ',
E.expr(
E.literal(value, {"type": literaltype})
)
),
),
';'
),
target.tail
)
logger.debug(etree.tostring(new_decl_stmt))
try:
target_idx = target.getparent().index(target)
target.getparent().insert(target_idx+1, new_decl_stmt)
self.srcml.apply_changes()
except Exception:
self.srcml.revert_changes()
raise
new_text = self.srcml.load_c_code()
return new_text.splitlines(keepends=True)
| bstee615/cfactor | refactorings/insert_noop.py | insert_noop.py | py | 1,800 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "refactorings.base.SrcMLTransformation",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "refactorings.random_word.get_random_word",
"line_number": 27,
"usage_type": "call"... |
21396845249 | from typing import Dict
from starlette.types import ASGIApp, Receive, Scope, Send
class AsgiDispatcher:
def __init__(self, patterns: Dict[str, ASGIApp], default: ASGIApp):
self.patterns = patterns
self.default_app = default
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
app = None
request_path = scope['path']
for pattern_prefix, pattern_app in self.patterns.items():
if request_path.startswith(pattern_prefix):
if scope['type'] in {'http', 'websocket'}:
app = pattern_app
break
if app is None:
app = self.default_app
await app(scope, receive, send)
| TheRacetrack/racetrack | racetrack_commons/racetrack_commons/api/asgi/dispatcher.py | dispatcher.py | py | 730 | python | en | code | 27 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "starlette.types.ASGIApp",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "starlette.types.Scope",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "starlette.typ... |
8797452881 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("C:/Users/Admin/OneDrive/Desktop/decision tree/Iris.csv")
df.head()
df.isnull().sum()
df.shape
df.info()
df.describe()
df.drop('Id',axis=1, inplace=True)
df.shape
df['Species'].value_counts().plot(kind='pie', autopct="%.1f%%")
df.corr()
sns.heatmap(df.corr(), annot=True)
x = df.iloc[:,:4].values
y = df.iloc[:,4:5]
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.20,random_state=0)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print(y_pred)
print("Accuracy: ", metrics.accuracy_score(y_test,y_pred))
new_data = [[3.5, 3.0, 1.2, 1.7]]
y_pred = model.predict(new_data)
print(y_pred)
from sklearn import tree
import matplotlib.pyplot as plt
plt.figure(figsize = (20,10))
tree.plot_tree(model, filled=True, rounded=True)
plt.show()
| ShreyasiDesai/LGMVIP-DataScience | decition tree.py | decition tree.py | py | 1,182 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 24,
"usage_type": "call"
},
{
"api_name"... |
28002035268 | import os
import torch
import numpy as np
from PIL import Image
# This dataset comes form paper:
# [2D and 3D Segmentation of Uncertain Local Collagen Fiber Orientations in SHG Microscopy]
# https://github.com/Emprime/uncertain-fiber-segmentation
def collagen3d_dataset(dataloader_config, label_type='mask'):
# Ref -- https://blog.csdn.net/Teeyohuang/article/details/79587125
# label_type: 'classlabel' or 'mask'
dataset_path = dataloader_config['dataset_path']
train_batch_size = dataloader_config['train_batch_size']
val_batch_size = dataloader_config['val_batch_size']
num_workers = os.cpu_count()
# num_workers = 1
train_data = _DatasetLD(data_path=dataset_path, dataset_return=label_type, read_in_ram_mode=True)
test_data = _DatasetLD(data_path=dataset_path, dataset_return=label_type, read_in_ram_mode=False)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=train_batch_size, shuffle=True,
num_workers=num_workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(test_data, batch_size=val_batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
# return train_data, test_data
return train_loader, val_loader
class _DatasetLD(torch.utils.data.Dataset):
def __init__(self, data_path, dataset_return, transform=None, target_transform=None, read_in_ram_mode=False):
super().__init__()
self.dataset_path = data_path
self.dataset_return = dataset_return
self.read_in_ram_mode = read_in_ram_mode
self.img_name = []
self.num_label = []
for subfolder in os.listdir(data_path):
subfolder_path = os.path.join(self.dataset_path, subfolder)
# Sub-folders:
# shg-ce-de: SHG image
# shg-masks: SHG mask
if subfolder == 'shg-ce-de':
for root, dirs, files in os.walk(subfolder_path):
if not len(dirs) and len(files):
# print(root, dirs, files)
self.img_name.append(root)
# Read all images in RAM, which requires a large RAM
if self.read_in_ram_mode:
img_all_list, mask_all_list = [], []
for i, index in enumerate(range(len(self.img_name))):
print(f'Reading image [{i+1}]/[{len(self.img_name)}]')
image_folder = self.img_name[index]
mask_folder = self.img_name[index].replace('shg-ce-de', 'shg-masks')
img_list, mask_list = [], []
img_file_list, mask_file_list = list(os.listdir(image_folder)), list(os.listdir(mask_folder))
img_file_list.sort(key=self._sort_num)
for img_name in img_file_list:
img = np.array(Image.open(os.path.join(image_folder, img_name)).convert('L')) # [H, W]
img = np.reshape(img, img.shape + (1,)) # Convert gray image into [H, W, C] mode
# img = np.array(Image.open(os.path.join(image_folder, img_name)))
img_list.append(img)
mask_file_list.sort(key=self._sort_num)
for mask_name in mask_file_list:
mask_list.append(np.array(Image.open(os.path.join(mask_folder, mask_name)))) # [H, W, C]
img_all_list.append(img_list)
mask_all_list.append(mask_list)
self.img_name, self.num_label = img_all_list, mask_all_list
self.transform = transform
self.target_transform = target_transform
@staticmethod
def _inner_rand_cut(img_in, cut_start):
h, w = img_in.shape
if h > w:
return img_in[cut_start:cut_start+w, :, :]
else:
return img_in[:, cut_start:cut_start+h, :]
@staticmethod
def _sort_num(name_string):
'''
Separate numbers in a name, in order to sort.
Extract the first number in string
'''
import re
num = re.findall('\d+\.?\d*', name_string)
try:
num = float(num[0])
except:
num = -1.0
return num
def __getitem__(self, index): # Read data once
if self.dataset_return == 'mask':
return self._getitem_mask(index)
elif self.dataset_return == 'classlabel':
return self._getitem_label(index)
else:
return
def _getitem_label(self, index): # Read data once
# Todo !!!!!!!!! Not written
file_name = self.img_name[index]
label = self.num_label[index]
img = Image.open(os.path.join(self.dataset_path, 'image', file_name))
img = np.array(img)
# Random cut
h, w = img.shape
cut_start = np.random.randint(0, abs(h-w))
img = self._inner_rand_cut(img, cut_start)
if self.transform is not None:
img = self.transform(img)
return img, label
def _getitem_mask(self, index): # Read data once
if self.read_in_ram_mode:
img_list = self.img_name[index]
mask_list = self.num_label[index]
else:
image_folder = self.img_name[index]
mask_folder = self.img_name[index].replace('shg-ce-de', 'shg-masks')
img_list, mask_list = [], []
img_file_list, mask_file_list = list(os.listdir(image_folder)), list(os.listdir(mask_folder))
img_file_list.sort(key=self._sort_num)
for img_name in img_file_list:
img = np.array(Image.open(os.path.join(image_folder, img_name)).convert('L')) # [H, W]
img = np.reshape(img, img.shape + (1,)) # Convert gray image into [H, W, C] mode
# img = np.array(Image.open(os.path.join(image_folder, img_name)))
img_list.append(img)
mask_file_list.sort(key=self._sort_num)
for mask_name in mask_file_list:
mask_list.append(np.array(Image.open(os.path.join(mask_folder, mask_name)))) # [H, W, C]
img = np.array(img_list).transpose([3, 1, 2, 0]) # Convert from [D, H, W, C] into [C, H, W, D] mode
mask = np.array(mask_list)
mask = np.max(mask, axis=3) # Convert mask to label
mask = np.transpose(mask, [1, 2, 0]) # Convert from [D, H, W] into [H, W, D] mode
# ToDo Temp
_, h, w, d = img.shape
new_size = 64
new_depth = 32
h_random, w_random, d_random = np.random.randint(0, h-new_size), np.random.randint(0, w-new_size), np.random.randint(0, d-new_depth)
img = img[:, h_random:h_random+new_size, w_random:w_random+new_size, d_random:d_random+new_depth]
mask = mask[h_random:h_random+new_size, w_random:w_random+new_size, d_random:d_random+new_depth]
######################################
# Should cut with mask here
######################################
# # Random cut
# h, w = img.shape
# if np.abs(h - w):
# cut_start = np.random.randint(0, abs(h - w))
# img = self._inner_rand_cut(img, cut_start)
# mask = self._inner_rand_cut(mask, cut_start)
if self.transform is not None:
img = self.transform(img)
return np.array(img, dtype=np.float32), np.array(mask/255., dtype=np.int64)
def __len__(self):
return len(self.img_name)
| Surtol-Sun/TrainFramework_torch | components/dataset_loader/dataset_loader_3dcollagen.py | dataset_loader_3dcollagen.py | py | 7,648 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.cpu_count",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.utils... |
19772157877 | # -*- coding: utf-8 -*-
"""
python -c "import doctest, ibeis; print(doctest.testmod(ibeis.model.hots.hots_nn_index))"
python -m doctest -v ibeis/model/hots/hots_nn_index.py
python -m doctest ibeis/model/hots/hots_nn_index.py
"""
from __future__ import absolute_import, division, print_function
# Standard
from six.moves import zip, map, range
#from itertools import chain
import sys
# Science
import numpy as np
# UTool
import utool
# VTool
from ibeis.other import ibsfuncs
import vtool.nearest_neighbors as nntool
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[nnindex]', DEBUG=False)
NOCACHE_FLANN = '--nocache-flann' in sys.argv
def get_indexed_cfgstr(ibs, aid_list):
"""
Creates a config string for the input into the nearest neighbors index
It is based off of the features which were computed for it and the indexes
of the input annotations.
TODO: We should probably use the Annotation UUIDS rather than the ROWIDs
to compute this configstr
"""
feat_cfgstr = ibs.cfg.feat_cfg.get_cfgstr()
# returns something like: _daids((6)qbm6uaegu7gv!ut!)_FEAT(params)
daid_cfgstr = utool.hashstr_arr(aid_list, 'daids') # todo change to uuids
new_cfgstr = '_' + daid_cfgstr + feat_cfgstr
return new_cfgstr
def build_ibs_inverted_descriptor_index(ibs, aid_list):
"""
Aggregates descriptors of input annotations and returns inverted information
"""
try:
if len(aid_list) == 0:
msg = ('len(aid_list) == 0\n'
'Cannot build inverted index without features!')
raise AssertionError(msg)
desc_list = ibs.get_annot_desc(aid_list)
dx2_desc, dx2_aid, dx2_fx = _try_build_inverted_descriptor_index(aid_list, desc_list)
return dx2_desc, dx2_aid, dx2_fx
except Exception as ex:
intostr = ibs.get_infostr()
print(intostr)
utool.printex(ex, 'cannot build inverted index', key_list=list(locals().keys()))
raise
def _try_build_inverted_descriptor_index(aid_list, desc_list):
"""
Wrapper which performs logging and error checking
"""
if utool.NOT_QUIET:
print('[agg_desc] stacking descriptors from %d annotations' % len(aid_list))
try:
dx2_desc, dx2_aid, dx2_fx = _build_inverted_descriptor_index(aid_list, desc_list)
except MemoryError as ex:
utool.printex(ex, 'cannot build inverted index', '[!memerror]')
raise
if utool.NOT_QUIET:
print('[agg_desc] stacked %d descriptors from %d annotations'
% (len(dx2_desc), len(aid_list)))
return dx2_desc, dx2_aid, dx2_fx
def _build_inverted_descriptor_index(aid_list, desc_list):
"""
Stacks descriptors into a flat structure and returns inverse mapping from
flat database descriptor indexes (dx) to annotation ids (aid) and feature
indexes (fx). Feature indexes are w.r.t. annotation indexes.
Output:
dx2_desc - flat descriptor stack
dx2_aid - inverted index into annotations
dx2_fx - inverted index into features
# Example with 2D Descriptors
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> from ibeis.model.hots.hots_nn_index import _build_inverted_descriptor_index
>>> DESC_TYPE = np.uint8
>>> aid_list = [1, 2, 3, 4, 5]
>>> desc_list = [
... np.array([[0, 0], [0, 1]], dtype=DESC_TYPE),
... np.array([[5, 3], [2, 30], [1, 1]], dtype=DESC_TYPE),
... np.empty((0, 2), dtype=DESC_TYPE),
... np.array([[5, 3], [2, 30], [1, 1]], dtype=DESC_TYPE),
... np.array([[3, 3], [42, 42], [2, 6]], dtype=DESC_TYPE),
... ]
>>> dx2_desc, dx2_aid, dx2_fx = _build_inverted_descriptor_index(aid_list, desc_list)
>>> print(repr(dx2_desc.T))
array([[ 0, 0, 5, 2, 1, 5, 2, 1, 3, 42, 2],
[ 0, 1, 3, 30, 1, 3, 30, 1, 3, 42, 6]], dtype=uint8)
>>> print(repr(dx2_aid))
array([1, 1, 2, 2, 2, 4, 4, 4, 5, 5, 5])
>>> print(repr(dx2_fx))
array([0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2])
cdef:
list aid_list, desc_list
long nFeat, aid
iter aid_nFeat_iter, nFeat_iter, _ax2_aid, _ax2_fx
np.ndarray dx2_aid, dx2_fx, dx2_desc
"""
# Build inverted index of (aid, fx) pairs
aid_nFeat_iter = zip(aid_list, map(len, desc_list))
nFeat_iter = map(len, desc_list)
# generate aid inverted index for each feature in each annotation
_ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter)
# Avi: please test the timing of the lines neighboring this statement.
#_ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter)
# generate featx inverted index for each feature in each annotation
_ax2_fx = (range(nFeat) for nFeat in nFeat_iter)
# Flatten generators into the inverted index
#dx2_aid = np.array(list(chain.from_iterable(_ax2_aid)))
#dx2_fx = np.array(list(chain.from_iterable(_ax2_fx)))
dx2_aid = np.array(utool.flatten(_ax2_aid))
dx2_fx = np.array(utool.flatten(_ax2_fx))
# Stack descriptors into numpy array corresponding to inverted inexed
# This might throw a MemoryError
dx2_desc = np.vstack(desc_list)
return dx2_desc, dx2_aid, dx2_fx
#@utool.indent_func('[build_invx]')
def build_flann_inverted_index(ibs, aid_list, **kwargs):
"""
Build a inverted index (using FLANN)
"""
# Aggregate descriptors
dx2_desc, dx2_aid, dx2_fx = build_ibs_inverted_descriptor_index(ibs, aid_list)
# hash which annotations are input
indexed_cfgstr = get_indexed_cfgstr(ibs, aid_list)
flann_params = {'algorithm': 'kdtree', 'trees': 4}
flann_cachedir = ibs.get_flann_cachedir()
precomp_kwargs = {'cache_dir': flann_cachedir,
'cfgstr': indexed_cfgstr,
'flann_params': flann_params,
'use_cache': kwargs.get('use_cache', not NOCACHE_FLANN)}
# Build/Load the flann index
flann = nntool.flann_cache(dx2_desc, **precomp_kwargs)
return dx2_desc, dx2_aid, dx2_fx, flann
class HOTSIndex(object):
""" HotSpotter Nearest Neighbor (FLANN) Index Class
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> import ibeis
>>> ibs = ibeis.test_main(db='testdb1') #doctest: +ELLIPSIS
<BLANKLINE>
...
>>> daid_list = [1, 2, 3, 4]
>>> hsindex = HOTSIndex(ibs, daid_list) #doctest: +ELLIPSIS
[nnindex...
>>> print(hsindex) #doctest: +ELLIPSIS
<ibeis.model.hots.hots_nn_index.HOTSIndex object at ...>
"""
def __init__(hsindex, ibs, daid_list, **kwargs):
print('[nnindex] building HOTSIndex object')
dx2_desc, dx2_aid, dx2_fx, flann = build_flann_inverted_index(
ibs, daid_list, **kwargs)
# Agg Data
hsindex.dx2_aid = dx2_aid
hsindex.dx2_fx = dx2_fx
hsindex.dx2_data = dx2_desc
# Grab the keypoints names and image ids before query time
#hsindex.rx2_kpts = ibs.get_annot_kpts(daid_list)
#hsindex.rx2_gid = ibs.get_annot_gids(daid_list)
#hsindex.rx2_nid = ibs.get_annot_nids(daid_list)
hsindex.flann = flann
def __getstate__(hsindex):
""" This class it not pickleable """
#printDBG('get state HOTSIndex')
return None
#def __del__(hsindex):
# """ Ensure flann is propertly removed """
# printDBG('deleting HOTSIndex')
# if getattr(hsindex, 'flann', None) is not None:
# nn_selfindex.flann.delete_index()
# #del hsindex.flann
# hsindex.flann = None
def nn_index(hsindex, qfx2_desc, K, checks):
(qfx2_dx, qfx2_dist) = hsindex.flann.nn_index(qfx2_desc, K, checks=checks)
return (qfx2_dx, qfx2_dist)
def nn_index2(hsindex, qreq, qfx2_desc):
""" return nearest neighbors from this data_index's flann object """
flann = hsindex.flann
K = qreq.cfg.nn_cfg.K
Knorm = qreq.cfg.nn_cfg.Knorm
checks = qreq.cfg.nn_cfg.checks
(qfx2_dx, qfx2_dist) = flann.nn_index(qfx2_desc, K + Knorm, checks=checks)
qfx2_aid = hsindex.dx2_aid[qfx2_dx]
qfx2_fx = hsindex.dx2_fx[qfx2_dx]
return qfx2_aid, qfx2_fx, qfx2_dist, K, Knorm
class HOTSMultiIndex(object):
"""
Generalization of a HOTSNNIndex
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> import ibeis
>>> daid_list = [1, 2, 3, 4]
>>> num_forests = 8
>>> ibs = ibeis.test_main(db='testdb1') #doctest: +ELLIPSIS
<BLANKLINE>
...
>>> split_index = HOTSMultiIndex(ibs, daid_list, num_forests) #doctest: +ELLIPSIS
[nnsindex...
>>> print(split_index) #doctest: +ELLIPSIS
<ibeis.model.hots.hots_nn_index.HOTSMultiIndex object at ...>
"""
def __init__(split_index, ibs, daid_list, num_forests=8):
print('[nnsindex] make HOTSMultiIndex over %d annots' % (len(daid_list),))
# Remove unknown names
aid_list = daid_list
known_aids_list, unknown_aids = ibsfuncs.group_annots_by_known_names(ibs, aid_list)
num_bins = min(max(map(len, known_aids_list)), num_forests)
# Put one name per forest
forest_aids, overflow_aids = utool.sample_zip(
known_aids_list, num_bins, allow_overflow=True, per_bin=1)
forest_indexes = []
extra_indexes = []
for tx, aids in enumerate(forest_aids):
print('[nnsindex] building forest %d/%d with %d aids' %
(tx + 1, num_bins, len(aids)))
if len(aids) > 0:
hsindex = HOTSIndex(ibs, aids)
forest_indexes.append(hsindex)
if len(overflow_aids) > 0:
print('[nnsindex] building overflow forest')
overflow_index = HOTSIndex(ibs, overflow_aids)
extra_indexes.append(overflow_index)
if len(unknown_aids) > 0:
print('[nnsindex] building unknown forest')
unknown_index = HOTSIndex(ibs, unknown_aids)
extra_indexes.append(unknown_index)
#print('[nnsindex] building normalizer forest') # TODO
split_index.forest_indexes = forest_indexes
split_index.extra_indexes = extra_indexes
#split_index.overflow_index = overflow_index
#split_index.unknown_index = unknown_index
#@utool.classmember(HOTSMultiIndex)
def nn_index(split_index, qfx2_desc, num_neighbors):
qfx2_dx_list = []
qfx2_dist_list = []
qfx2_aid_list = []
qfx2_fx_list = []
qfx2_rankx_list = [] # ranks index
qfx2_treex_list = [] # tree index
for tx, hsindex in enumerate(split_index.forest_indexes):
flann = hsindex.flann
# Returns distances in ascending order for each query descriptor
(qfx2_dx, qfx2_dist) = flann.nn_index(qfx2_desc, num_neighbors, checks=1024)
qfx2_dx_list.append(qfx2_dx)
qfx2_dist_list.append(qfx2_dist)
qfx2_fx = hsindex.dx2_fx[qfx2_dx]
qfx2_aid = hsindex.dx2_aid[qfx2_dx]
qfx2_fx_list.append(qfx2_fx)
qfx2_aid_list.append(qfx2_aid)
qfx2_rankx_list.append(np.array([[rankx for rankx in range(qfx2_dx.shape[1])]] * len(qfx2_dx)))
qfx2_treex_list.append(np.array([[tx for rankx in range(qfx2_dx.shape[1])]] * len(qfx2_dx)))
# Combine results from each tree
(qfx2_dist_, qfx2_aid_, qfx2_fx_, qfx2_dx_, qfx2_rankx_, qfx2_treex_,) = \
join_split_nn(qfx2_dist_list, qfx2_dist_list, qfx2_rankx_list, qfx2_treex_list)
def join_split_nn(qfx2_dx_list, qfx2_dist_list, qfx2_aid_list, qfx2_fx_list, qfx2_rankx_list, qfx2_treex_list):
qfx2_dx = np.hstack(qfx2_dx_list)
qfx2_dist = np.hstack(qfx2_dist_list)
qfx2_rankx = np.hstack(qfx2_rankx_list)
qfx2_treex = np.hstack(qfx2_treex_list)
qfx2_aid = np.hstack(qfx2_aid_list)
qfx2_fx = np.hstack(qfx2_fx_list)
# Sort over all tree result distances
qfx2_sortx = qfx2_dist.argsort(axis=1)
# Apply sorting to concatenated results
qfx2_dist_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_dist)]
qfx2_aid_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_dx)]
qfx2_fx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_aid)]
qfx2_dx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_fx)]
qfx2_rankx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_rankx)]
qfx2_treex_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_treex)]
return (qfx2_dist_, qfx2_aid_, qfx2_fx_, qfx2_dx_, qfx2_rankx_, qfx2_treex_,)
#@utool.classmember(HOTSMultiIndex)
def split_index_daids(split_index):
for hsindex in split_index.forest_indexes:
pass
#if __name__ == '__main__':
# #python -m doctest -v ibeis/model/hots/hots_nn_index.py
# import doctest
# doctest.testmod()
| smenon8/ibeis | _broken/old/hots_nn_index.py | hots_nn_index.py | py | 12,775 | python | en | code | null | github-code | 6 | [
{
"api_name": "utool.inject",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "utool.hashstr_arr",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utool.printex",
"lin... |
12608079869 | '''
Load embedding, create dictionary, convert text to index
'''
import io
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import argparse
#import json
import os
import numpy as np
import pickle
import pdb
def text2index(text, vocab, analyzer):
# 1 is unk
doc_toks = [vocab[y] if y in vocab else 1 for y in analyzer(text) ]
return doc_toks
def load_vectors(fname):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
data[tokens[0]] = np.fromiter(map(float, tokens[1:]), dtype=np.float)
return data
def build_vocab(text, emb, emb_dim=300, max_df=.7, max_features=20000, stop_words= 'english'):
'''
Fit vocabulary
:param text: list of documents for creating vocabulary
:return: vectorizer
'''
vect = CountVectorizer(stop_words=stop_words, max_df=max_df, max_features=max_features,
token_pattern=r"(?u)[!\"#\$\%&\'()\*\+,-./:;<=>\?@\[\\\]\^_`{|}~\w]+")
vect.fit(text)
no_embedding = [k for k in vect.vocabulary_.keys() if k not in emb]
print("No Embeddings for: ")
print(len(no_embedding))
vocab = [k for i, k in enumerate(vect.vocabulary_.keys()) if k in emb]
new_vocab = dict([(k, i + 2) for i, k in enumerate(vocab)])
# Set 0 to be the padding index, 1 to be unk
vect.vocabulary_ = new_vocab
print('Vocabulary size: ', len(new_vocab))
embedding = np.zeros(shape=(len(new_vocab) + 2, emb_dim))
for k,i in new_vocab.items():
embedding[i] = emb[k]
return vect, embedding
def df2List(df, vocab, analyzer, label_dict, ismnli = False):
out = []
for i, row in df.iterrows():
set1 = text2index(row['sentence1'], vocab, analyzer)
set2 = text2index(row['sentence2'], vocab, analyzer)
label = label_dict[row['label']]
if ismnli:
genre = row['genre']
else:
genre = 'snli'
out.append([set1, set2, label, i, genre])
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--inputPath", default = '../hw2_data/') # Should have train/val in this directory
parser.add_argument("--embPath", default='../hw2_data/wiki-news-300d-1M.vec') # embedding vector path
parser.add_argument("--emb_dim", type=int, default = 300)
parser.add_argument("--outPath") # Output Path
parser.add_argument("--max_df", type=float, default = 0.7)
parser.add_argument("--max_features", type=int, default=20000)
parser.add_argument("--stop_words", default = 'english')
args = parser.parse_args()
if not os.path.isdir(args.outPath):
os.mkdir(args.outPath)
print("Data processing parameters: ", args)
print("Loading Data")
train = pd.read_csv(args.inputPath + 'snli_train.tsv', header = 0, sep = '\t')
test = pd.read_csv(args.inputPath + 'snli_val.tsv', header=0, sep='\t')
train_mnli = pd.read_csv(args.inputPath + 'mnli_train.tsv', header=0, sep='\t')
test_mnli = pd.read_csv(args.inputPath + 'mnli_val.tsv', header=0, sep='\t')
emb = load_vectors(args.embPath)
print("Fitting Vocabulary")
vect, embedding = build_vocab(train['sentence1'] + ' ' + train['sentence2'], emb, emb_dim = args.emb_dim,
max_df = args.max_df, max_features = args.max_features, stop_words=args.stop_words)
#vect = pickle.load(open(args.outPath + 'vect.p', 'rb'))
vocab = vect.vocabulary_
analyzer = vect.build_analyzer()
print('Transform data frame')
label_dict = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
train2 = df2List(train, vocab, analyzer, label_dict)
test2 = df2List(test, vocab, analyzer, label_dict)
train_mnli2 = df2List(train_mnli, vocab, analyzer, label_dict, ismnli = True)
test_mnli2 = df2List(test_mnli, vocab, analyzer, label_dict, ismnli = True)
pickle.dump(train2, open(args.outPath + 'train.p', 'wb'))
pickle.dump(test2, open(args.outPath + 'test.p', 'wb'))
pickle.dump(train_mnli2, open(args.outPath + 'train_mnli.p', 'wb'))
pickle.dump(test_mnli2, open(args.outPath + 'test_mnli.p', 'wb'))
pickle.dump(vect, open(args.outPath + 'vect.p', 'wb'))
pickle.dump(embedding, open(args.outPath + 'embedding.p', 'wb'))
# Document length:
lsLen = [max(len(x[0]), len(x[1])) for x in train2]
print('Median doc size: ', np.percentile(lsLen, 50))
print('95 percentile: ', np.percentile(lsLen, 95))
print('Max: ', max(lsLen))
lsLen = [max(len(x[0]), len(x[1])) for x in train_mnli2]
print('Median mnli_doc size: ', np.percentile(lsLen, 50))
print('95 percentile: ', np.percentile(lsLen, 95))
print('Max: ', max(lsLen))
| jingsliu/NLP_HW | HW2/code/dataPrep.py | dataPrep.py | py | 4,852 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "io.open",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.fromiter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "sklearn.feature_extraction.te... |
12509808903 | import requests
city = input('enter the city... ')
api_address = 'https://samples.openweathermap.org/data/2.5/weather?q={},uk&appid=b6907d289e10d714a6e88b30761fae22'.format(
city)
url = api_address + city
data = requests.get(url).json()
# print(data)
weather = data['weather']
print(weather[0]['description'])
| Riyam224/techcampus---projects | 04/testApi.py | testApi.py | py | 319 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
}
] |
71735044668 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
# Change the display options
pd.options.display.max_columns = None
pd.options.display.max_rows = None
species_df = pd.read_csv('species_info.csv')
observations_df = pd.read_csv('observations.csv')
# print(species_df.head())
# print(observations_df.head())
# Describe data of species
# print(species_df.dtypes)
species = species_df.astype({'category': 'string',
'scientific_name': 'string',
'common_names': 'string',
'conservation_status': 'string'}) # Change our types of columns
# print(species_df.info())
print(species_df.describe())
print(species_df.category.value_counts())
# print(species_df.conservation_status.value_counts(normalize=True))
# Pie and bar of category
"""
sub_category = species_df.category.value_counts()
plt.figure(figsize=(10, 8))
plt.pie(species_df.category.value_counts().values, labels=species_df.category.value_counts().index, autopct='%1.1f%%')
plt.suptitle('Category of species', fontweight='bold')
plt.savefig('pie_category.png')
plt.show()
"""
# Describe data of observations
# print(observations_df.dtypes)
observations_df = observations_df.astype({'scientific_name': 'string',
'park_name': 'string'})
# print(observations_df.info())
print(observations_df.describe())
# print(observations_df.observations.median())
# print(observations_df.observations.mode())
# print(observations_df.observations.mad())
# The distribution of conservation_status for animals
"""
status_counts = species_df.conservation_status.value_counts()
plt.figure(figsize=(10, 8))
plt.subplot(1, 2, 1)
sns.countplot(x='conservation_status', data=species_df)
plt.xlabel('Conservation status')
plt.ylabel('Count of status')
plt.xticks(rotation=15)
plt.subplot(1, 2, 2)
plt.pie(status_counts, labels=status_counts.index, autopct='%1.1f%%')
plt.axis('equal')
plt.suptitle('Distribution of conservation status for animals', fontweight='bold')
plt.subplots_adjust(wspace=0.5)
plt.savefig('dis_con_status.png')
plt.show()
plt.clf()
"""
# Certain types of species more likely to be endangered
influence = pd.crosstab(species_df.category, species_df.conservation_status)
influence_prop = influence / len(species_df)
print(influence)
print(influence_prop)
influence_marginals = influence_prop.sum(axis=0)
influence_marginals_1 = influence_prop.sum(axis=1)
print(influence_marginals)
print(influence_marginals_1)
chi2, pval, dof, expected = stats.chi2_contingency(influence)
print(expected)
print(chi2)
# Species were spotted the most at each park
"""
merged_df = species_df.merge(observations_df)
grouped_df = merged_df.groupby('category')['observations'].count()
print(grouped_df)
plt.figure(figsize=(15, 8))
plt.subplot(1, 2, 1)
sns.boxplot(x='category', y='observations', data=merged_df)
plt.xlabel('Species')
plt.ylabel('Number of observations')
plt.xticks(rotation=15)
plt.subplot(1, 2, 2)
plt.pie(grouped_df, labels=grouped_df.index, autopct='%1.1f%%')
plt.suptitle('Species were spotted the most at each park', fontweight='bold')
plt.savefig('species_observ.png')
plt.show()
plt.clf()
"""
# sns.histplot(x='observations', data=observations_df)
# plt.show()
print(species_df.scientific_name.mode())
| Pavich-3/-Biodiversity-in-National-Parks | project.py | project.py | py | 3,509 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.options",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pandas.options",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv... |
30192254789 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
# sigmoid函数
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# 定义回归模型
def model(X, theta):
return sigmoid(np.dot(X, theta.T))
# 计算梯度
def gradient(X, y, theta):
grad = np.zeros(theta.shape) # 初始化梯度,维度与参数向量的维度相同
error = (model(X, theta) - y).ravel() # 计算偏差
for j in range(len(theta.ravel())): # 计算n个偏导数(梯度)
term = np.multiply(error, X[:, j])
grad[0, j] = np.sum(term) / len(X)
return grad
# 定义损失函数
def cost(X, y, theta):
return np.sum((np.multiply(-y, np.log(model(X, theta)))) - (np.multiply(1 - y, np.log(1 - model(X, theta)))))/(len(X))
path = 'datas' + os.sep + 'iris.csv'
irisData = pd.read_csv(path, header=None, names=['petal_len', 'petal_width', 'sepal_len', 'sepal_width', 'class'],
dtype={'petal_len': float, 'petal_width': float, 'sepal_len': float, 'sepal_width': float,
'class': str})
irisData.loc[irisData['class'] == 'setosa', 'class'] = 0 # 将setosa置为0
irisData.loc[irisData['class'] == 'versicolor', 'class'] = 1 # 将versicolor置为1
irisData.loc[irisData['class'] == 'virginica', 'class'] = 2 # 将virginica置为2
print("---------------打印数据信息------------------ #")
print(irisData.head()) # 打印前两行
print(irisData.shape) # 打印数据维度
print(irisData.describe()) # 打印描述信息
print()
# 绘制数据分布图像
positive = irisData[irisData['class'] == 0] # 设置正类
negative = irisData[irisData['class'] == 1] # 设置负类
# fig, ax = plt.subplots(figsize=(8, 6))
fig, figer1 = plt.subplots(figsize=(10, 5)) # 设置图像大小
figer1.scatter(positive['sepal_len'], positive['sepal_width'], s=30, c='b', marker='o', label='setosa') # 绘制setosa花的散点图
figer1.scatter(negative['sepal_len'], negative['sepal_width'], s=30, c='r', marker='x',
label='versicolor') # 绘制versicolor花的散点图
figer1.legend(loc=2) # 标题放在左上角
figer1.set_xlabel('sepal_len') # 设置x标签
figer1.set_ylabel('sepal_width') # 设置y标签
plt.show() # 显示初始图像
irisData.insert(2, 'Ones', 1) # 在第3列插入一列数据,值为1
print("----------打印初始数据的前五行------------ ")
print(irisData.head())
orig_data = irisData.as_matrix() # 构造一个矩阵
print(orig_data.dtype)
print("----------------初始打印矩阵-----------------")
print(orig_data[:5, :])
cols = orig_data.shape[1] # 得到矩阵的列数
orig_data = orig_data[:100, :] # 取矩阵的前100行数据
scaled_data1 = orig_data[:50, 2:cols] # 第一类数据矩阵,选择花瓣属性
scaled_data2 = orig_data[50:100, 2:cols] # 第二类数据矩阵
np.random.shuffle(scaled_data1) # 打乱第一类数据的顺序
np.random.shuffle(scaled_data2) # 打乱第二类数据的顺序
np.random.shuffle(orig_data)
# 从两个矩阵中分别取固定个数的数据作为测试集
# scaled_data = orig_data[4:100, 2:cols]
#scaled_data = np.vstack((scaled_data1[:25, :], scaled_data2[:25, :])) # 50%
#scaled_data = np.vstack((scaled_data1[:15, :], scaled_data2[:15, :])) # 30%
scaled_data = np.vstack((scaled_data1[:5, :], scaled_data2[:5, :])) # 10%
np.random.shuffle(scaled_data) # 打乱测试集数据的顺序
print("-------打印测试集-------")
print(scaled_data)
print("------测试集的属性-------")
print(scaled_data.shape)
# 从两个矩阵中分别取相同个数的数据作为训练集
# orig_data = orig_data[:4, 2:cols]
#orig_data = np.vstack((scaled_data1[25:50, :], scaled_data2[25:50, :])) # 50%
#orig_data = np.vstack((scaled_data1[15:50, :], scaled_data2[15:50, :])) # 70%
orig_data = np.vstack((scaled_data1[5:50, :], scaled_data2[5:50, :])) # 90%
np.random.shuffle(orig_data) # 打乱训练集数据的顺序
print("---------打印训练集--------")
print(orig_data)
X = orig_data[:100, 1:cols - 1] # 选择前三列
y = orig_data[:100, cols - 1:cols] # 选择最后一列结果
print("-------打印X的值-------")
print(X)
print("---------打印y的值----------")
print(y)
# 构造参数向量
theta = np.zeros([1, 3])
# 打印矩阵的维度
print("----------打印训练数据的信息----------")
print("参数值为:")
print(theta)
print("X的维度为:")
print(X.shape)
print("y的维度为")
print(y.shape)
print("参数的维度为")
print(theta.shape)
c = cost(X, y, theta) # 求初始损失函数的值
print("--------初始损失值为-------")
print(X.dtype)
print(c)
# 刷新数据,打乱数据的顺序
def shuffleData(data):
np.random.shuffle(data)
cols = data.shape[1]
X = data[:100, 0:cols - 1]
y = data[:100, cols - 1:]
return X, y
import time
# 定义梯度下降求解函数
def descent(data, theta, batchSize, threshold, alpha):
init_time = time.time() # 设置初始时间
i = 0 # 设置迭代次数
k = 0 # batch
X, y = shuffleData(data) # 打乱数据
grad = np.zeros(theta.shape) # 计算初始的梯度
costs = [cost(X, y, theta)] # 计算初始损失函数值
# 开始迭代
while True:
grad = gradient(X[k:k + batchSize], y[k:k + batchSize], theta) # 求解梯度值
k += batchSize # 取batch个数据
if k >= n: # 如果数据取完
k = 0
X, y = shuffleData(data) # 对数据进行重新洗牌
theta = theta - alpha * grad # 对参数进行更新
print(theta)
cost_new = cost(X, y, theta) # 计算新的损失值
print(cost_new)
costs.append(cost_new) # 将新的损失之追加到列表末尾
i += 1 # 更新循环变量
value = costs # cost为损失值
if abs(value[-1] - value[-2]) < threshold:
break
return theta, i - 1, costs, grad, time.time() - init_time
# 绘制图像
def Run(data, theta, batchSize, thresh, alpha):
theta, iter, costs, grad, dur = descent(data, theta, batchSize, thresh, alpha) # 开始执行梯度下降
name = "Original" if (data[:, 1] > 2).sum() > 1 else "Scaled"
name += " data - learning rate: {} -".format(alpha)
# 选择梯度下降策略和停止方案
if batchSize == n:
strDescType = "Gradient"
elif batchSize == 1:
strDescType = "Stochastic"
else:
strDescType = "Mini-batch({})".format(batchSize)
name += strDescType + " descent - stop: "
strStop = "costs change < {}".format(thresh)
name += strStop
print("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(name, theta, iter, costs[-1],
dur))
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(np.arange(len(costs)), costs, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title(name)
plt.show()
return theta
# 开始训练模型
n = 100 # 一次读入100个数据进行训练
print("打印矩阵")
print(orig_data)
theta = Run(orig_data, theta, n, thresh=0.000001, alpha=0.1) # 两次迭代损失函数变化非常小时停止(1e-6)
# 对结果进行测试
# 设定阈值 大于0.5则为1,小于0.5为0
def predict(X, theta):
return [1 if x >= 0.5 else 0 for x in model(X, theta)]
scaled_X = scaled_data[:, :3] # 设置测试集输入
y = scaled_data[:, 3] # 正确值
print("--------打印测试的数据---------")
print(scaled_X)
print("----------theta的值为-----------")
print(theta)
predictions = predict(scaled_X, theta)
print("-----------打印预测值-----------")
print(predictions)
print("-------------打印真实值-----------")
print(y)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) / len(correct)) * 100
print('正确率 = {0}%'.format(accuracy))
# 设置分割曲线函数
def y1(x2, theta):
# y = theta[0] + theta[1]* x1 + theta[2] * x2
x1 = (-(theta[0, 0] + theta[0, 2] * x2)) / theta[0, 1]
return x1
x2 = np.linspace(0, 5, 1000)
x1 = y1(x2, theta)
fig, figer1 = plt.subplots(figsize=(10, 5)) # 设置图像大小
figer1.scatter(positive['sepal_len'], positive['sepal_width'], s=30, c='b', marker='o', label='setosa') # 绘制setosa花的散点图
figer1.scatter(negative['sepal_len'], negative['sepal_width'], s=30, c='r', marker='x',
label='versicolor') # 绘制versicolor花的散点图
figer1.legend(loc=2) # 标题放在左上角
figer1.set_xlabel('sepal_len') # 设置x标签
figer1.set_ylabel('sepal_width') # 设置y标签
plt.plot(x1, x2, 'r-', linewidth=1)
plt.show() # 显示结果图像
| TJPU-ML/Homework-for-the-fall-semester-of-2018 | iris classification/张家源/iris4.py | iris4.py | py | 9,017 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.exp",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 22... |
72757362749 | """
Roll adjusted and multiple prices for a given contract, after checking that we do not have positions
NOTE: this does not update the roll calendar .csv files stored elsewhere. Under DRY the sole source of production
roll info is the multiple prices series
"""
from dataclasses import dataclass
import numpy as np
from syscore.interactive import print_menu_of_values_and_get_response, get_and_convert
from syscore.objects import success, failure, status, named_object
from syscore.text import landing_strip, print_with_landing_strips_around
from sysdata.data_blob import dataBlob
from sysobjects.contracts import futuresContract
from sysobjects.production.roll_state import (
default_state,
roll_adj_state,
explain_roll_state_str,
allowable_roll_state_from_current_and_position,
RollState,
no_roll_state,
)
from sysproduction.reporting.report_configs import roll_report_config
from sysproduction.reporting.reporting_functions import run_report_with_data_blob
from sysproduction.data.positions import diagPositions, updatePositions
from sysproduction.data.contracts import dataContracts
from sysproduction.data.prices import diagPrices, get_valid_instrument_code_from_user
from sysproduction.reporting.data.rolls import (
rollingAdjustedAndMultiplePrices,
relative_volume_in_forward_contract_versus_price,
)
no_change_required = named_object("No roll required")
EXIT_CODE = "EXIT"
def interactive_update_roll_status():
with dataBlob(log_name="Interactive_Update-Roll-Status") as data:
function_to_call = get_rolling_master_function()
function_to_call(data)
def get_rolling_master_function():
MANUAL_INPUT = "Manually input instrument codes and manually decide when to roll"
MENU_OPTIONS = [
MANUAL_INPUT,
"Cycle through instrument codes automatically, but manually decide when to roll",
"Cycle through instrument codes automatically, auto decide when to roll, manually confirm rolls",
"Cycle through instrument codes automatically, auto decide when to roll, automatically roll",
]
function_list = [
update_roll_status_manual_cycle,
update_roll_status_auto_cycle_manual_decide,
update_roll_status_auto_cycle_manual_confirm,
update_roll_status_full_auto,
]
print("How do you want to do your rolls today?")
selection = print_menu_of_values_and_get_response(
MENU_OPTIONS, default_str=MANUAL_INPUT
)
selection_idx = MENU_OPTIONS.index(selection)
function_to_call = function_list[selection_idx]
return function_to_call
@dataclass
class RollDataWithStateReporting(object):
instrument_code: str
original_roll_status: RollState
position_priced_contract: int
allowable_roll_states_as_list_of_str: list
days_until_roll: int
relative_volume: float
@property
def original_roll_status_as_string(self):
return self.original_roll_status.name
def display_roll_query_banner(self):
print(landing_strip(80))
print("Current State: %s" % self.original_roll_status)
print(
"Current position in priced contract %d (if zero can Roll Adjusted prices)"
% self.position_priced_contract
)
print("")
print("These are your options:")
print("")
for state_number, state in enumerate(self.allowable_roll_states_as_list_of_str):
print("%s: %s" % (state, explain_roll_state_str(state)))
print("")
def update_roll_status_manual_cycle(data: dataBlob):
do_another = True
while do_another:
instrument_code = get_valid_instrument_code_from_user(
data=data, allow_exit=True, exit_code=EXIT_CODE
)
if instrument_code is EXIT_CODE:
# belt and braces
do_another = False
else:
manually_report_and_update_roll_state_for_code(data, instrument_code)
return success
def update_roll_status_auto_cycle_manual_decide(data: dataBlob):
days_ahead = get_days_ahead_to_consider_when_auto_cycling()
instrument_list = get_list_of_instruments_to_auto_cycle(data, days_ahead=days_ahead)
for instrument_code in instrument_list:
manually_report_and_update_roll_state_for_code(
data=data, instrument_code=instrument_code
)
return success
def update_roll_status_auto_cycle_manual_confirm(data: dataBlob):
days_ahead = get_days_ahead_to_consider_when_auto_cycling()
auto_parameters = get_auto_roll_parameters()
instrument_list = get_list_of_instruments_to_auto_cycle(data, days_ahead=days_ahead)
for instrument_code in instrument_list:
roll_data = setup_roll_data_with_state_reporting(data, instrument_code)
roll_state_required = auto_selected_roll_state_instrument(
data=data, roll_data=roll_data, auto_parameters=auto_parameters
)
if roll_state_required is no_change_required:
warn_not_rolling(instrument_code, auto_parameters)
else:
modify_roll_state(
data=data,
instrument_code=instrument_code,
original_roll_state=roll_data.original_roll_status,
roll_state_required=roll_state_required,
confirm_adjusted_price_change=True,
)
def update_roll_status_full_auto(data: dataBlob):
days_ahead = get_days_ahead_to_consider_when_auto_cycling()
instrument_list = get_list_of_instruments_to_auto_cycle(data, days_ahead=days_ahead)
auto_parameters = get_auto_roll_parameters()
for instrument_code in instrument_list:
roll_data = setup_roll_data_with_state_reporting(data, instrument_code)
roll_state_required = auto_selected_roll_state_instrument(
data=data, roll_data=roll_data, auto_parameters=auto_parameters
)
if roll_state_required is no_change_required:
warn_not_rolling(instrument_code, auto_parameters)
else:
modify_roll_state(
data=data,
instrument_code=instrument_code,
original_roll_state=roll_data.original_roll_status,
roll_state_required=roll_state_required,
confirm_adjusted_price_change=False,
)
def get_days_ahead_to_consider_when_auto_cycling() -> int:
days_ahead = get_and_convert(
"How many days ahead should I look for expiries?",
type_expected=int,
allow_default=True,
default_value=10,
)
return days_ahead
def get_list_of_instruments_to_auto_cycle(data: dataBlob, days_ahead: int = 10) -> list:
diag_prices = diagPrices()
list_of_potential_instruments = (
diag_prices.get_list_of_instruments_in_multiple_prices()
)
instrument_list = [
instrument_code
for instrument_code in list_of_potential_instruments
if include_instrument_in_auto_cycle(
data=data, instrument_code=instrument_code, days_ahead=days_ahead
)
]
print_with_landing_strips_around(
"Identified following instruments that are near expiry %s"
% str(instrument_list)
)
return instrument_list
def include_instrument_in_auto_cycle(
data: dataBlob, instrument_code: str, days_ahead: int = 10
) -> bool:
days_until_expiry = days_until_earliest_expiry(data, instrument_code)
return days_until_expiry <= days_ahead
def days_until_earliest_expiry(data: dataBlob, instrument_code: str) -> int:
data_contracts = dataContracts(data)
carry_days = data_contracts.days_until_carry_expiry(instrument_code)
roll_days = data_contracts.days_until_roll(instrument_code)
price_days = data_contracts.days_until_price_expiry(instrument_code)
return min([carry_days, roll_days, price_days])
@dataclass
class autoRollParameters:
min_volume: float
manual_prompt_for_position: bool
state_when_position_held: RollState
def get_auto_roll_parameters() -> autoRollParameters:
min_volume = get_and_convert(
"Minimum relative volume before rolling",
type_expected=float,
allow_default=True,
default_value=0.1,
)
manual_prompt_for_position_str = input(
"Manually prompt for state if have position? (n / *anything for yes*)"
)
if manual_prompt_for_position_str == "n":
manual_prompt_for_position = False
else:
manual_prompt_for_position = True
if manual_prompt_for_position:
state_when_position_held = no_change_required
else:
state_when_position_held = get_state_to_use_for_held_position()
auto_parameters = autoRollParameters(
min_volume=min_volume,
manual_prompt_for_position=manual_prompt_for_position,
state_when_position_held=state_when_position_held,
)
return auto_parameters
STATE_OPTIONS = [RollState.Passive, RollState.Force, RollState.Force_Outright]
STATE_OPTIONS_AS_STR = [str(state) for state in STATE_OPTIONS]
def get_state_to_use_for_held_position() -> RollState:
print(
"Choose state to automatically assume if we have a position in priced contract AND roll state is currently NO ROLL"
)
select_state_for_position_held = print_menu_of_values_and_get_response(
STATE_OPTIONS_AS_STR, default_str=STATE_OPTIONS_AS_STR[0]
)
state_when_position_held = STATE_OPTIONS[
STATE_OPTIONS_AS_STR.index(select_state_for_position_held)
]
return state_when_position_held
def auto_selected_roll_state_instrument(
data: dataBlob,
roll_data: RollDataWithStateReporting,
auto_parameters: autoRollParameters,
) -> RollState:
if roll_data.relative_volume < auto_parameters.min_volume:
print_with_landing_strips_around(
"For %s relative volume of %f is less than minimum of %s : NOT AUTO ROLLING"
% (
roll_data.instrument_code,
roll_data.relative_volume,
auto_parameters.min_volume,
)
)
return no_change_required
no_position_held = roll_data.position_priced_contract == 0
if no_position_held:
print_with_landing_strips_around(
"No position held, auto rolling adjusted price for %s"
% roll_data.instrument_code
)
return roll_adj_state
if auto_parameters.manual_prompt_for_position:
run_roll_report(data, roll_data.instrument_code)
roll_state_required = get_roll_state_required(roll_data)
return roll_state_required
original_roll_status = roll_data.original_roll_status
if original_roll_status is no_roll_state:
roll_state_required = auto_parameters.state_when_position_held
print_with_landing_strips_around(
"Automatically changing state from %s to %s for %s"
% (original_roll_status, roll_state_required, roll_data.instrument_code)
)
else:
print_with_landing_strips_around(
"Roll status already set to %s for %s: not changing"
% (original_roll_status, roll_data.instrument_code)
)
return no_change_required
return roll_state_required
def warn_not_rolling(instrument_code: str, auto_parameters: autoRollParameters):
print_with_landing_strips_around(
"\n NOT rolling %s as doesn't meet auto parameters %s\n"
% (instrument_code, str(auto_parameters))
)
def manually_report_and_update_roll_state_for_code(
data: dataBlob, instrument_code: str
):
run_roll_report(data, instrument_code)
manually_update_roll_state_for_code(data, instrument_code)
def manually_update_roll_state_for_code(data: dataBlob, instrument_code: str):
# First get the roll info
# This will also update to console
data.log.setup(instrument_code=instrument_code)
roll_data = setup_roll_data_with_state_reporting(data, instrument_code)
roll_state_required = get_roll_state_required(roll_data)
modify_roll_state(
data=data,
instrument_code=instrument_code,
original_roll_state=roll_data.original_roll_status,
roll_state_required=roll_state_required,
confirm_adjusted_price_change=True,
)
return success
def run_roll_report(data: dataBlob, instrument_code: str):
config = roll_report_config.new_config_with_modified_output("console")
config.modify_kwargs(instrument_code=instrument_code)
report_results = run_report_with_data_blob(config, data)
if report_results is failure:
raise Exception("Can't run roll report, so can't change status")
def get_roll_state_required(roll_data: RollDataWithStateReporting) -> RollState:
invalid_input = True
while invalid_input:
roll_data.display_roll_query_banner()
roll_state_required_as_str = print_menu_of_values_and_get_response(
roll_data.allowable_roll_states_as_list_of_str
)
if roll_state_required_as_str != roll_data.original_roll_status_as_string:
# check if changing
print("")
check = input(
"Changing roll state for %s from %s to %s, are you sure y/n to try again/<RETURN> to exit: "
% (
roll_data.instrument_code,
roll_data.original_roll_status_as_string,
roll_state_required_as_str,
)
)
print("")
if check == "y":
# happy
return RollState[roll_state_required_as_str]
elif check == "":
print("Okay, we're done")
return no_change_required
else:
print("OK. Choose again.")
# back to top of loop
continue
else:
print("No change")
return no_change_required
def setup_roll_data_with_state_reporting(
data: dataBlob, instrument_code: str
) -> RollDataWithStateReporting:
diag_positions = diagPositions(data)
diag_contracts = dataContracts(data)
original_roll_status = diag_positions.get_roll_state(instrument_code)
priced_contract_date = diag_contracts.get_priced_contract_id(instrument_code)
contract = futuresContract(instrument_code, priced_contract_date)
position_priced_contract = int(diag_positions.get_position_for_contract(contract))
allowable_roll_states = allowable_roll_state_from_current_and_position(
original_roll_status, position_priced_contract
)
days_until_roll = diag_contracts.days_until_roll(instrument_code)
relative_volume = relative_volume_in_forward_contract_versus_price(
data=data, instrument_code=instrument_code
)
if np.isnan(relative_volume):
relative_volume = 0.0
roll_data_with_state = RollDataWithStateReporting(
instrument_code=instrument_code,
original_roll_status=original_roll_status,
position_priced_contract=position_priced_contract,
allowable_roll_states_as_list_of_str=allowable_roll_states,
days_until_roll=days_until_roll,
relative_volume=relative_volume,
)
return roll_data_with_state
def modify_roll_state(
data: dataBlob,
instrument_code: str,
original_roll_state: RollState,
roll_state_required: RollState,
confirm_adjusted_price_change: bool = True,
):
if roll_state_required is no_change_required:
return
if roll_state_required is original_roll_state:
return
update_positions = updatePositions(data)
update_positions.set_roll_state(instrument_code, roll_state_required)
if roll_state_required is roll_adj_state:
state_change_to_roll_adjusted_prices(
data=data,
instrument_code=instrument_code,
original_roll_state=original_roll_state,
confirm_adjusted_price_change=confirm_adjusted_price_change,
)
def state_change_to_roll_adjusted_prices(
data: dataBlob,
instrument_code: str,
original_roll_state: RollState,
confirm_adjusted_price_change: bool = True,
):
# Going to roll adjusted prices
update_positions = updatePositions(data)
roll_result = roll_adjusted_and_multiple_prices(
data=data,
instrument_code=instrument_code,
confirm_adjusted_price_change=confirm_adjusted_price_change,
)
if roll_result is success:
# Return the state back to default (no roll) state
data.log.msg(
"Successful roll! Returning roll state of %s to %s"
% (instrument_code, default_state)
)
update_positions.set_roll_state(instrument_code, default_state)
else:
data.log.msg(
"Something has gone wrong with rolling adjusted of %s! Returning roll state to previous state of %s"
% (instrument_code, original_roll_state)
)
update_positions.set_roll_state(instrument_code, original_roll_state)
def roll_adjusted_and_multiple_prices(
data: dataBlob, instrument_code: str, confirm_adjusted_price_change: bool = True
) -> status:
"""
Roll multiple and adjusted prices
THE POSITION MUST BE ZERO IN THE PRICED CONTRACT! WE DON'T CHECK THIS HERE
:param data: dataBlob
:param instrument_code: str
:return:
"""
print(landing_strip(80))
print("")
print("Rolling adjusted prices!")
print("")
try:
rolling_adj_and_mult_object = rollingAdjustedAndMultiplePrices(
data, instrument_code
)
# this will also do the roll calculations
rolling_adj_and_mult_object.compare_old_and_new_prices()
except Exception as e:
print("Error %s when trying to calculate roll prices" % str(e))
return failure
if confirm_adjusted_price_change:
confirm_roll = input(
"Confirm roll adjusted prices for %s are you sure y/n:" % instrument_code
)
if confirm_roll != "y":
print(
"\nUSER DID NOT WANT TO ROLL: Setting roll status back to previous state"
)
return failure
else:
print_with_landing_strips_around("AUTO ROLLING - NO USER CONFIRMATION REQUIRED")
try:
rolling_adj_and_mult_object.write_new_rolled_data()
except Exception as e:
data.log.warn(
"%s went wrong when rolling: Going to roll-back to original multiple/adjusted prices"
% e
)
rolling_adj_and_mult_object.rollback()
return failure
return success
| ahalsall/pysystrade | sysproduction/interactive_update_roll_status.py | interactive_update_roll_status.py | py | 18,575 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "syscore.objects.named_object",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sysdata.data_blob.dataBlob",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "syscore.interactive.print_menu_of_values_and_get_response",
"line_number": 67,
"u... |
18760758159 | import time
import aiohttp
import discord
import importlib
import os
import sys
import requests
import asyncio
from io import BytesIO
from discord.ext import commands
from my_utils import permissions, default, dataIO
from my_utils.guildstate import state_instance
class admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
self._last_result = None
@commands.command()
@commands.check(permissions.is_owner)
async def load(self, ctx, name: str):
""" Loads an extension. """
try:
self.bot.load_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Loaded extension **{name}.py**")
@commands.command()
@commands.check(permissions.is_owner)
async def unload(self, ctx, name: str):
""" Unloads an extension. """
try:
self.bot.unload_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Unloaded extension **{name}.py**")
@commands.command()
@commands.check(permissions.is_owner)
async def reload(self, ctx, name: str):
""" Reloads an extension. """
try:
self.bot.reload_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Reloaded extension **{name}.py**")
@commands.command()
@commands.check(permissions.is_owner)
async def reloadall(self, ctx):
""" Reloads all extensions. """
error_collection = []
for file in os.listdir("cogs"):
if file.endswith(".py"):
name = file[:-3]
try:
self.bot.reload_extension(f"cogs.{name}")
except Exception as e:
error_collection.append(
[file, default.traceback_maker(e, advance=False)]
)
if error_collection:
output = "\n".join([f"**{g[0]}** ```diff\n- {g[1]}```" for g in error_collection])
return await ctx.send(
f"Attempted to reload all extensions, was able to reload, "
f"however the following failed: \n\n{output}"
)
await ctx.send("Successfully reloaded all extensions")
@commands.command()
@commands.check(permissions.is_owner)
async def reloadutils(self, ctx, name: str):
""" Reloads a utils module. """
name_maker = f"utils_folder/{name}.py"
try:
module_name = importlib.import_module(f"utils_folder.{name}")
importlib.reload(module_name)
except ModuleNotFoundError:
return await ctx.send(f"Couldn't find module named **{name_maker}**")
except Exception as e:
error = default.traceback_maker(e)
return await ctx.send(f"Module **{name_maker}** returned error and was not reloaded...\n{error}")
await ctx.send(f"Reloaded module **{name_maker}**")
@commands.command()
@commands.check(permissions.is_owner)
async def reboot(self, ctx):
""" Reboot the bot """
await ctx.send('Rebooting now...')
time.sleep(1)
dataIO.backup_states(state_instance)
await self.bot.close()
sys.exit()
@commands.command()
@commands.check(permissions.is_owner)
async def dm(self, ctx, user_id: int, *, message: str):
""" DM the user of your choice """
user = self.bot.get_user(user_id)
if not user:
return await ctx.send(f"Could not find any UserID matching **{user_id}**")
try:
await user.send(message)
await ctx.send(f"✉️ Sent a DM to **{user_id}**")
except discord.Forbidden:
await ctx.send("This user might be having DMs blocked or it's a bot account...")
@commands.group()
@commands.check(permissions.is_owner)
async def change(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
@change.command(name="playing")
@commands.check(permissions.is_owner)
async def change_playing(self, ctx, *, playing: str):
""" Change playing status. """
if self.config.status_type == "idle":
status_type = discord.Status.idle
elif self.config.status_type == "dnd":
status_type = discord.Status.dnd
else:
status_type = discord.Status.online
if self.config.playing_type == "listening":
playing_type = 2
elif self.config.playing_type == "watching":
playing_type = 3
else:
playing_type = 0
try:
await self.bot.change_presence(
activity=discord.Activity(type=playing_type, name=playing),
status=status_type
)
dataIO.change_value("config.json", "playing", playing)
await ctx.send(f"Successfully changed playing status to **{playing}**")
except discord.InvalidArgument as err:
await ctx.send(err)
except Exception as e:
await ctx.send(e)
@change.command(name="username")
@commands.check(permissions.is_owner)
async def change_username(self, ctx, *, name: str):
""" Change username. """
try:
await self.bot.user.edit(username=name)
await ctx.send(f"Successfully changed username to **{name}**")
except discord.HTTPException as err:
await ctx.send(err)
@change.command(name="nickname")
@commands.check(permissions.is_owner)
async def change_nickname(self, ctx, *, name: str = None):
""" Change nickname. """
try:
await ctx.guild.me.edit(nick=name)
if name:
await ctx.send(f"Successfully changed nickname to **{name}**")
else:
await ctx.send("Successfully removed nickname")
except Exception as err:
await ctx.send(err)
@change.command(name="avatar")
@commands.check(permissions.is_owner)
async def change_avatar(self, ctx, url: str = None):
""" Change avatar. """
if url is None and len(ctx.message.attachments) == 1:
url = ctx.message.attachments[0].url
else:
url = url.strip('<>') if url else None
try:
bio = requests.get(url).content
await self.bot.user.edit(avatar=bio)
await ctx.send(f"Successfully changed the avatar. Currently using:\n{url}")
except aiohttp.InvalidURL:
await ctx.send("The URL is invalid...")
except discord.InvalidArgument:
await ctx.send("This URL does not contain a useable image")
except discord.HTTPException as err:
await ctx.send(err)
except TypeError:
await ctx.send("You need to either provide an image URL or upload one with the command")
@change.command(name="def_prefix")
@commands.check(permissions.is_owner)
async def change_default_prefix(self, ctx, prefix):
"""Changes the default premanent prefix"""
dataIO.change_value("config.json", "prefix", prefix)
await ctx.send(f"Successfully changed default prefix to **{prefix}**")
@commands.command(aliases = ["api_for", "api"])
@commands.check(permissions.is_owner)
async def search_api(self, ctx, category = ""):
""" Search for some apis """
if category != "":
your_api = requests.get(f"https://api.publicapis.org/entries?category={category.lower()}&https=true").json()
elif category.lower() == "categories":
your_api = requests.get(f"https://api.publicapis.org/categories").json()
else:
your_api = requests.get("https://api.publicapis.org/random?auth=null").json()
if your_api['count'] == 0:
return await ctx.send("No APIs found")
apis = f"{your_api['entries'][0]['Category']} apis\n"
def auth(index):
if your_api['entries'][i]['Auth'] != None:
return your_api['entries'][i]['Auth']
return "None"
for i in range(your_api["count"]):
apis += f"**{i+1}**. {your_api['entries'][i]['API']} - {your_api['entries'][i]['Description']} | Auth: {auth(i)} | Cors: {your_api['entries'][i]['Cors']} | Link: {your_api['entries'][i]['Link']}\n"
if len(str(apis)) > 1999:
apis = apis[:2000][::-1]
arr = apis.index(".")
apis = apis[arr:][::-1]
return await ctx.send(apis)
@commands.group(aliases = ["file"])
@commands.check(permissions.is_owner)
async def fil(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
@fil.group()
@commands.check(permissions.is_owner)
async def add(self, ctx, location = ""):
if len(ctx.message.attachments) == 1 and location != "":
try:
await ctx.message.attachments[0].save(f"{location}\{ctx.message.attachments[0].filename}")
except FileNotFoundError:
await ctx.send("Directory not found. Creating directory...")
os.makedirs(location)
await ctx.message.attachments[0].save(f"{location}\{ctx.message.attachments[0].filename}")
elif len(ctx.message.attachments) == 1 and location == "":
await ctx.message.attachments[0].save(f"{ctx.message.attachments[0].filename}")
else:
return await ctx.send("Provide a file as an attachment")
await ctx.message.delete(delay=1)
return await ctx.send(f"The {ctx.message.attachments[0].filename} has been added")
@fil.group()
@commands.check(permissions.is_owner)
async def remove(self, ctx, file_name_with_path):
await ctx.send("Are you sure you want to remove the file. Please remember to unload if the file is and existing cog.\n(y/n)")
def mcheck(message):
if message.author == ctx.author:
return True
return False
try:
answer = await self.bot.wait_for('message', timeout=20, check=mcheck)
except asyncio.TimeoutError:
return await ctx.send("You didn't respond in time")
if answer.content == "y":
pass
else:
return await ctx.send("As you wish, the file will not be removed")
try:
default.delete(file_name_with_path)
await ctx.send(f"Removed {file_name_with_path}")
except Exception as e:
await ctx.send(e)
await ctx.message.delete(delay=1)
def setup(bot):
bot.add_cog(admin(bot)) | Albedo-Discord/ext | cogs/admin.py | admin.py | py | 10,872 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "my_utils.default.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name"... |
75167070268 | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
import math
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size/2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size))
return window
def SSIM1(img1, img2):
(_, channel, _, _) = img1.size()
window_size = 11
pad = int(window_size/11)
window = create_window(window_size, channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding = pad, groups = channel)
mu2 = F.conv2d(img2, window, padding = pad, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = pad, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = pad, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = pad, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def SSIM(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def PSNR(img1, img2):
mse = np.mean( (img1/255. - img2/255.) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def CIEDE2000(Lab_1, Lab_2):
'''Calculates CIEDE2000 color distance between two CIE L*a*b* colors'''
C_25_7 = 6103515625 # 25**7
L1, a1, b1 = Lab_1[0], Lab_1[1], Lab_1[2]
L2, a2, b2 = Lab_2[0], Lab_2[1], Lab_2[2]
C1 = math.sqrt(a1 ** 2 + b1 ** 2)
C2 = math.sqrt(a2 ** 2 + b2 ** 2)
C_ave = (C1 + C2) / 2
G = 0.5 * (1 - math.sqrt(C_ave ** 7 / (C_ave ** 7 + C_25_7)))
L1_, L2_ = L1, L2
a1_, a2_ = (1 + G) * a1, (1 + G) * a2
b1_, b2_ = b1, b2
C1_ = math.sqrt(a1_ ** 2 + b1_ ** 2)
C2_ = math.sqrt(a2_ ** 2 + b2_ ** 2)
if b1_ == 0 and a1_ == 0:
h1_ = 0
elif a1_ >= 0:
h1_ = math.atan2(b1_, a1_)
else:
h1_ = math.atan2(b1_, a1_) + 2 * math.pi
if b2_ == 0 and a2_ == 0:
h2_ = 0
elif a2_ >= 0:
h2_ = math.atan2(b2_, a2_)
else:
h2_ = math.atan2(b2_, a2_) + 2 * math.pi
dL_ = L2_ - L1_
dC_ = C2_ - C1_
dh_ = h2_ - h1_
if C1_ * C2_ == 0:
dh_ = 0
elif dh_ > math.pi:
dh_ -= 2 * math.pi
elif dh_ < -math.pi:
dh_ += 2 * math.pi
dH_ = 2 * math.sqrt(C1_ * C2_) * math.sin(dh_ / 2)
L_ave = (L1_ + L2_) / 2
C_ave = (C1_ + C2_) / 2
_dh = abs(h1_ - h2_)
_sh = h1_ + h2_
C1C2 = C1_ * C2_
if _dh <= math.pi and C1C2 != 0:
h_ave = (h1_ + h2_) / 2
elif _dh > math.pi and _sh < 2 * math.pi and C1C2 != 0:
h_ave = (h1_ + h2_) / 2 + math.pi
elif _dh > math.pi and _sh >= 2 * math.pi and C1C2 != 0:
h_ave = (h1_ + h2_) / 2 - math.pi
else:
h_ave = h1_ + h2_
T = 1 - 0.17 * math.cos(h_ave - math.pi / 6) + 0.24 * math.cos(2 * h_ave) + 0.32 * math.cos(
3 * h_ave + math.pi / 30) - 0.2 * math.cos(4 * h_ave - 63 * math.pi / 180)
h_ave_deg = h_ave * 180 / math.pi
if h_ave_deg < 0:
h_ave_deg += 360
elif h_ave_deg > 360:
h_ave_deg -= 360
dTheta = 30 * math.exp(-(((h_ave_deg - 275) / 25) ** 2))
R_C = 2 * math.sqrt(C_ave ** 7 / (C_ave ** 7 + C_25_7))
S_C = 1 + 0.045 * C_ave
S_H = 1 + 0.015 * C_ave * T
Lm50s = (L_ave - 50) ** 2
S_L = 1 + 0.015 * Lm50s / math.sqrt(20 + Lm50s)
R_T = -math.sin(dTheta * math.pi / 90) * R_C
k_L, k_C, k_H = 1, 1, 1
f_L = dL_ / k_L / S_L
f_C = dC_ / k_C / S_C
f_H = dH_ / k_H / S_H
dE_00 = math.sqrt(f_L ** 2 + f_C ** 2 + f_H ** 2 + R_T * f_C * f_H)
return dE_00
def rgb2xyz(rgb):
def format(c):
c = c / 255.
if c > 0.04045: c = ((c + 0.055) / 1.055) ** 2.4
else: c = c / 12.92
return c * 100
rgb = list(map(format, rgb))
xyz = [None, None, None]
xyz[0] = rgb[0] * 0.4124 + rgb[1] * 0.3576 + rgb[2] * 0.1805
xyz[1] = rgb[0] * 0.2126 + rgb[1] * 0.7152 + rgb[2] * 0.0722
xyz[2] = rgb[0] * 0.0193 + rgb[1] * 0.1192 + rgb[2] * 0.9505
return xyz
def xyz2lab(xyz):
def format(c):
if c > 0.008856: c = c ** (1. / 3.)
else: c = (7.787 * c) + (16. / 116.)
return c
xyz[0] = xyz[0] / 95.047
xyz[1] = xyz[1] / 100.00
xyz[2] = xyz[2] / 108.883
xyz = list(map(format, xyz))
lab = [None, None, None]
lab[0] = (116. * xyz[1]) - 16.
lab[1] = 500. * (xyz[0] - xyz[1])
lab[2] = 200. * (xyz[1] - xyz[2])
return lab | chenkhan/haze-synthesizing | util/metrics.py | metrics.py | py | 5,878 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.Tensor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.conv2... |
69869460347 | def Mark(
x=None, text='', color='',
bold=True, underline=False):
""" This function prints an object x and adds a description text.
It is useful for for debugging. """
start = ''
end = ''
if color != '' or bold or underline:
end='\033[0m'
colorDict = {
'': '',
None: '',
'purple' : '\033[95m',
'cyan' : '\033[96m',
'darkcyan' : '\033[36m',
'blue' : '\033[94m',
'green' : '\033[92m',
'yellow' : '\033[93m',
'red' : '\033[91m',
'bold' : '\033[1m',
'underline' : '\033[4m',
'end' : '\033[0m'}
if bold:
start = start + colorDict['bold']
if underline:
start = start + colorDict['underline']
start = start + colorDict[color]
print("*** " + start + text + end)
if x is not None:
print(x)
'''
## examples
Mark(2*10, 'xxx', bold=False, underline=True)
Mark(2*10, 'xxx', bold=False, underline=True, color='red')
Mark(x='', text='xxx', bold=True, underline=True, color='green')
'''
## These are the default functions to communicate with OS
# re-write them if needed
FileExists = os.path.exists
OpenFile = open
ListDir = os.listdir
def CustomMarkFcn(fn=None, logTime=True, color=''):
""" This functions returns a custom function which prints x,
with description text.
It also saves x in a file if fn is not None. """
fileExists = False
if fn != None:
fileExists = FileExists(fn)
if fileExists:
appendWrite = 'a' # append if already exists
else:
appendWrite = 'w' # make a new file if not
# define the Marking Fcn here
def F(x=None, text='', color=color, bold=True, underline=False):
timeStr = str(datetime.datetime.now())[:19]
if fn is not None:
orig_stdout = sys.stdout
f = OpenFile(fn, appendWrite)
sys.stdout = f
if logTime:
Mark(text='This was run at this time:' + timeStr,
bold=False, underline=False, color='')
Mark(x=x, text=text, color='', bold=False, underline=False)
f.close()
sys.stdout = orig_stdout
if logTime:
Mark(text='This was run at this time:' + timeStr)
Mark(x=x, text=text, color=color, bold=bold, underline=underline)
return F
'''
fn = 'log.txt'
CustomMark = CustomMarkFcn(fn=fn)
CustomMark(x=2, text='NO')
'''
# to print a function definition
PrintFcnContent = inspect.getsourcelines
## mapping a dictionary via map
#def MapDict(f, dic):
# return dict(map(lambda (k,v): (k, f(v)), dic.iteritems()))
def BitOr(x):
"""bitwise OR: same as BIT_OR in SQL."""
return functools.reduce(lambda a,b: (a|b), x)
def Signif(n):
""" Builds a function for rounding up to n number of significant digits."""
def F(x):
if math.isnan(x):
return x
if x == 0:
return 0
out = round(np.absolute(x),
-int(math.floor(math.log10(np.absolute(x))) + (-n+1)))
if x < 0:
out = -out
return out
return F
### Reading / Writing Data
## read csv
def ReadCsv(
fn,
sep=',',
nrows=None,
typeDict={},
header='infer',
engine='c',
error_bad_lines=False,
printLog=False):
with OpenFile(fn, 'r') as f:
df = pd.read_csv(
f, sep=sep, nrows=nrows, dtype=typeDict, header=header,
engine=engine, error_bad_lines=error_bad_lines)
if printLog:
print(fn + ' was read.')
return df
## write csv (or tsv)
def WriteCsv(
fn,
df,
sep=',',
append=False,
index=False,
printLog=False):
wa = 'w'
header = list(df.columns)
if append:
wa = 'a'
header = False
with OpenFile(fn, wa) as f:
df.to_csv(f, sep=sep, index=index, mode=wa, header=header)
if printLog:
print(fn + ' was written.')
return None
## reads multiple data files according to a pattern given.
## Filters them and then row binds them.
## the pattern is given in three lists: prefix, middle, suffix
def ReadMultipleDf(
prefix,
middle,
suffix,
ReadF=ReadCsv,
DfFilterF=None):
n = max([len(prefix), len(middle), len(suffix)])
def FillList(x):
if len(x) < n:
x = x*n
x = x[:n]
return x
prefix = FillList(prefix)
suffix = FillList(suffix)
middle = FillList(middle)
df = pd.DataFrame({'prefix': prefix, 'middle': middle, 'suffix': suffix})
fileList = (df['prefix'] + df['middle'] + df['suffix']).values
#dfList = list()
for i in range(len(fileList)):
f = fileList[i]
df = ReadF(f)
if DfFilterF != None:
df = DfFilterF(df)
if i == 0:
dfAll = df
else:
dfAll = dfAll.append(df, ignore_index=True)
return dfAll
## Read all files in a dir with same columns
# and concatenating them
def ReadDirData(
path, ListDirF=ListDir, ReadF=ReadCsv,
WriteF=WriteCsv, writeFn=None, DfFilterF=None):
print(path)
fileList = ListDirF(path)
print(fileList)
#dfList = list()
outDf = None
for i in range(len(fileList)):
f = path + fileList[i]
print("*** opening: " + f)
df = ReadF(f)
print("data shape for this partition:")
print(df.shape)
if DfFilterF != None:
df = DfFilterF(df)
print("data shape for this partition after filtering:")
print(df.shape)
## we either row bind data or we write data if writeFn is not None
if writeFn == None:
if i == 0:
outDf = df
else:
outDf = outDf.append(df, ignore_index=True)
else:
if i == 0:
WriteF(fn=writeFn, df=df, sep=',', append=False)
else:
WriteF(fn=writeFn, df=df, sep=',', append=True)
print("First rows of data:")
print(df.iloc[:5])
return outDf
## Read all files in a dir with same columns
# and concatenating them
def ReadDirData_parallel(
path, ListDirF=ListDir, ReadF=ReadCsv,
WriteF=WriteCsv, writeFn=None,
DfFilterF=None, returnDfDict=False,
limitFileNum=None):
print(path)
fileList = ListDirF(path)
print(fileList)
if limitFileNum is not None:
k = min(limitFileNum, len(fileList))
fileList = fileList[:k]
outDf = None
dfDict = {}
def F(i):
f = path + fileList[i]
Mark(text="opening: partition " + str(i) + '; ' + f)
df = ReadF(f)
Mark(df.shape, text="data shape for partition " + str(i))
if DfFilterF != None:
df = DfFilterF(df)
Mark(
df.shape,
text="data shape for partition " + str(i) + " after filtering:")
dfDict[i] = df
return None
[F(x) for x in range(len(fileList))]
if returnDfDict:
return dfDict
## we either row bind data or we write data if writeFn is not None
if writeFn is None:
'''
for i in range(len(fileList)):
if i == 0:
outDf = dfDict[i]
else:
outDf = outDf.append(dfDict[i], ignore_index=True)
'''
outDf = pd.concat(dfDict.values())
else:
for i in range(len(fileList)):
if i == 0:
WriteF(fn=writeFn, df=dfDict[i], sep=',', append=False)
else:
WriteF(fn=writeFn, df=dfDict[i], sep=',', append=True)
Mark(outDf.iloc[:10], text="First rows of data:")
return outDf
def Write_shardedData_parallel(
df, fnPrefix, path, fnExten=".csv",
partitionCol=None,
shardNum=100, WriteF=WriteCsv,
limitFileNum=None):
""" write sharded data wrt a partition column
the data is written in parallel for speed purposes
also at read time we can read data faster"""
if partitionCol is None:
partitionCol = "dummy_col"
df["dummy_col"] = range(len(df))
def Bucket(s):
return int(hashlib.sha1(str(s)).hexdigest(), 16) % (shardNum)
df["shard"] = df[partitionCol].map(Bucket)
if partitionCol is None:
del df["dummy_col"]
def Write(bucket):
df0 = df[df["shard"] == bucket]
fn = path + fnPrefix + "_" + str(bucket) + ".csv"
WriteF(fn=fn, df=df0, sep=',', append=False)
print(fn + " was written")
buckets = list(set(df["shard"].values))
if limitFileNum is not None:
k = min(limitFileNum, len(buckets))
buckets = buckets[:k]
[Write(bucket) for bucket in buckets]
return None
"""
df = GenUsageDf_forTesting()
path = ""
Write_shardedData_parallel(
df=df, fnPrefix="test", path=path, fnExten=".csv",
partitionCol="user_id",
WriteF=WriteCsv)
"""
############### Part 1: Data frame and data wrangling functions
## generate a data frame manually for testing data frame functions
# and usage metrics
def GenUsageDf_forTesting():
df = pd.DataFrame(columns=[
'country', 'user_id', 'expt', 'date', 'time',
'end_time', 'prod', 'form_factor'])
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:04:01',
'2017-04-12 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:05:05',
'2017-04-12 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:06:05',
'2017-04-12 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '0', 'base', '2017-04-12', '2017-04-12 00:06:30',
'2017-04-12 00:06:45', 'exploreFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:07:00',
'2017-04-12 00:07:50', 'editingFeat', 'PHN']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:14:00',
'2017-04-12 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:16:00',
'2017-04-12 00:17:09', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:18:00',
'2017-04-12 00:18:30', 'browsingFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:20:00',
'2017-04-12 00:21:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:22:00',
'2017-04-12 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:04:01',
'2017-04-12 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '1', 'base', '2017-04-12', '2017-04-12 00:05:05',
'2017-04-12 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '2', 'base', '2017-04-12', '2017-04-12 00:06:05',
'2017-04-12 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '2', 'base', '2017-04-12', '2017-04-12 00:06:30',
'2017-04-12 00:06:45', 'exploreFeat', 'COMP']
df.loc[len(df)] = ['US', '2', 'base', '2017-04-12', '2017-04-12 00:07:00',
'2017-04-12 00:07:50', 'editingFeat', 'PHN']
df.loc[len(df)] = ['JP', '3', 'test', '2017-04-12', '2017-04-12 00:14:00',
'2017-04-12 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '3', 'test', '2017-04-12', '2017-04-12 00:14:20',
'2017-04-12 00:18:59', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '3', 'test', '2017-04-12', '2017-04-12 00:19:00',
'2017-04-12 00:20:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '3', 'test', '2017-04-12', '2017-04-12 00:20:20',
'2017-04-12 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:03:10',
'2017-04-14 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:04:10',
'2017-04-14 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:05:15',
'2017-04-14 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:06:01',
'2017-04-14 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '4', 'test', '2017-04-14', '2017-04-14 00:06:35',
'2017-04-14 00:06:45', 'PresFeat', 'COMP']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:03:07',
'2017-04-14 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:04:04',
'2017-04-14 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:05:04',
'2017-04-14 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:06:03',
'2017-04-14 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['US', '5', 'test', '2017-04-14', '2017-04-14 00:06:28',
'2017-04-14 00:06:45', 'PresFeat', 'COMP']
df.loc[len(df)] = ['JP', '6', 'test', '2017-04-14', '2017-04-14 00:14:01',
'2017-04-14 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '6', 'test', '2017-04-14', '2017-04-14 00:14:19',
'2017-04-14 00:18:59', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '6', 'test', '2017-04-14', '2017-04-14 00:19:10',
'2017-04-14 00:20:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '6', 'test', '2017-04-14', '2017-04-14 00:20:11',
'2017-04-14 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['JP', '7', 'base', '2017-04-15', '2017-04-15 00:14:11',
'2017-04-15 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['JP', '7', 'base', '2017-04-15', '2017-04-15 00:14:22',
'2017-04-15 00:18:59', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '7', 'base', '2017-04-15', '2017-04-15 00:19:57',
'2017-04-15 00:20:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['JP', '7', 'base', '2017-04-15', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:04:01',
'2017-04-12 00:05:03', 'photoFeat', 'COMP']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:05:05',
'2017-04-12 00:06:04', 'PresFeat', 'PHN']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:06:05',
'2017-04-12 00:06:08', 'PresFeat', 'PHN']
df.loc[len(df)] = ['FR', '8', 'base', '2017-04-12', '2017-04-12 00:06:30',
'2017-04-12 00:06:45', 'exploreFeat', 'COMP']
df.loc[len(df)] = ['FR', '9', 'test', '2017-04-15', '2017-04-15 00:14:11',
'2017-04-15 00:14:10', 'photoFeat', 'COMP']
df.loc[len(df)] = ['FR', '9', 'test', '2017-04-15', '2017-04-15 00:14:22',
'2017-04-15 00:18:59', 'locFeat', 'COMP']
df.loc[len(df)] = ['FR', '9', 'test', '2017-04-15', '2017-04-15 00:19:57',
'2017-04-15 00:20:00', 'locFeat', 'COMP']
df.loc[len(df)] = ['FR', '9', 'test', '2017-04-15', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['NG', '10', 'test', '2017-04-16', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'StorageFeat', 'PHN']
df.loc[len(df)] = ['IR', '11', 'test', '2017-04-12', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'browsingFeat', 'PHN']
df.loc[len(df)] = ['IR', '12', 'base', '2017-04-16', '2017-04-15 00:21:56',
'2017-04-15 00:22:00', 'watchFeat', 'PHN']
df.loc[len(df)] = ['IR', '13', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['RU', '14', 'base', '2017-04-12', '2017-04-12 00:03:00',
'2017-04-12 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['RU', '15', 'base', '2017-04-13', '2017-04-13 00:03:00',
'2017-04-13 00:04:00', 'PresFeat', 'COMP']
df.loc[len(df)] = ['RU', '16', 'base', '2017-04-14', '2017-04-14 00:03:00',
'2017-04-14 00:04:00', 'PresFeat', 'COMP']
df['user_id'] = 'id' + df['user_id']
def F(x):
return(datetime.datetime.strptime(x[:19], "%Y-%m-%d %H:%M:%S"))
for col in ['time', 'end_time']:
df[col] = df[col].map(F)
df['duration'] = (df['end_time'] - df['time']) / np.timedelta64(1, 's')
df['value'] = np.random.uniform(low=1.0, high=5.0, size=df.shape[0])
return df
def BuildCondInd(df, condDict):
""" subsets a df according to values given in the dict: condDict
the data columns are given in the dictionary keys
the possible values (a list of values) for each column are
given in the dict values """
cols = condDict.keys()
n = df.shape[0]
ind = pd.Series([True] * n)
for i in range(len(cols)):
col = cols[i]
valueList = condDict[col]
if valueList != None and valueList != []:
ind0 = (df[col].isin(valueList))
ind = ind * ind0
return ind
'''
df = pd.DataFrame({
'a':[2, 1, 3, 2, 2, 2],
'b':['A', 'A', 'B', 'C', 'C', 'C'],
'c':['11','22','22','22', '22', '22']})
ind = BuildCondInd(df=df, condDict={'a':[1, 2], 'b':['A', 'B']})
df[ind]
ind = BuildCondInd(df=df, condDict={'a':[1, 2], 'b':None})
df[ind]
'''
## get the sub df immediately
def SubDf_withCond(df, condDict, resetIndex=True):
if (condDict is None) or (len(condDict) == 0):
return df
df = df.reset_index(drop=True)
ind = BuildCondInd(df=df, condDict=condDict)
df2 = df[ind].copy()
if resetIndex:
df2 = df2.reset_index(drop=True)
return df2
## subset df based on regex filters on string columns
# every column is given in a key and the value is a regex
def BuildRegexInd(df, regDict):
cols = regDict.keys()
n = df.shape[0]
ind = pd.Series([True]*n)
for i in range(len(cols)):
col = cols[i]
valueList = regDict[col]
if valueList != None and valueList != []:
ind0 = pd.Series([False] * n)
for value in valueList:
ind0 = ind0 + df[col].map(str).str.contains(value)
ind = ind * ind0
return ind
'''
df = pd.DataFrame(
{'a':[24, 12, 63, 2, 3312, 2],
'b':['A', 'A', 'BBAA', 'CD', 'CE', 'CF'],
'c':['11','22','22','23', '22', '22']})
ind = BuildRegexInd(df=df, regDict={'a':['1', '2'], 'b':['A', 'B']})
Mark(df[ind])
ind = BuildRegexInd(df=df, regDict={'a':['1', '3'], 'b':None})
Mark(df[ind])
ind = BuildRegexInd(df=df, regDict={'b':['B', 'C'], 'b':['^(?:(?!CE).)*$']})
Mark(df[ind])
## column b does not include CE but it includes A or B.
ind = BuildRegexInd(df=df, regDict={'b':['^(?!.*CE).*B.*$', '^(?!.*CE).*A.*$']})
Mark(df[ind])
'''
## check for two strings regex
def Regex_includesBothStr(s1, s2):
out = '^(?=.*' + s1 + ')(?=.*' + s2 + ').*$'
return out
'''
reg = Regex_includesBothStr(' cat ', ' dog ')
print(reg)
print(pd.Series(['cat-dog', ' cat hates dog ', 'tiger']).str.contains(reg))
'''
## rehashing a column (col)
# the input is a dictionary of data frames with that column
# we make sure the rehashing is fixed across data frames
def RehashCol_dfDict(dfDict, col, newCol=None, omitCol=False):
if newCol == None:
newCol = col + '_hashed'
dfNames = dfDict.keys()
values = []
for key in dfNames:
df0 = dfDict[key]
values0 = df0[col].values
values = list(set(values + list(values0)))
dfHash = pd.DataFrame({col: values, 'tempCol': range(len(values))})
newDfDict = {}
for key in dfNames:
df0 = dfDict[key]
dfNew = pd.merge(df0, dfHash, on=[col], how='left')
dfNew[newCol] = dfNew['tempCol']
del dfNew['tempCol']
if omitCol:
del dfNew[col]
newDfDict[key] = dfNew
return newDfDict
# it converts a float or string date to datetime
def FloatOrStr_toDate(x, format="%Y%m%d"):
if (x == None) or (x == 'nan') or (x == np.nan):
return pd.NaT
if (type(x).__name__ == 'float') and math.isnan(x):
return pd.NaT
s = str(x)
if s == 'nan':
return pd.NaT
import re
s = re.sub('_', '', s)
s = re.sub('-', '', s)
s = re.sub(':', '', s)
s = s[:8]
return datetime.datetime.strptime(s, format)
## convert to datetime
def ConvertToDateTime(x, dateTimeFormat="%Y-%m-%d %H:%M:%S", strLen=19):
if (x == None) or (x == 'nan') or (x == np.nan):
return pd.NaT
if (type(x).__name__ == 'float') and math.isnan(x):
return pd.NaT
s = str(x)
if s == 'nan':
return pd.NaT
return datetime.datetime.strptime(x[:strLen], dateTimeFormat)
## also lets define a function generator version for easy mapping
def ConvertToDateTimeFcn(dateTimeFormat="%Y-%m-%d %H:%M:%S", strLen=19):
def F(x):
return ConvertToDateTime(x, dateTimeFormat=dateTimeFormat, strLen=strLen)
return F
## convert weekday returned by isoweekday() to string
def WeekDayToStr(x):
d = {1:'Mon', 2:'Tue', 3:'Wed', 4:'Thu', 5:'Fri', 6:'Sat', 7:'Sun'}
if x in d.keys():
return d[x]
return 'nan'
'''
x = datetime.datetime(2017, 01, 01)
u = x.isoweekday()
WeekDayToStr(u)
'''
## assigns object types to string
# and assign "nan" to missing
def PrepareDf(df):
# colTypes = [str(df[col].dtype) for col in df.columns]
for col in df.columns:
if str(df[col].dtype) == "object":
df[col].fillna("nan", inplace=True)
df[col] = df[col].astype(str)
return df
# variable type
def Type(x):
return type(x).__name__
## short hashing
def ShortHash(s, length=8):
s = str(s).encode('utf-8')
hasher = hashlib.sha1(s)
return base64.urlsafe_b64encode(hasher.digest()[:length])
"""
ShortHash("asas")
"""
## integrates columns (integCols) out from a data frame.
# It uses integFcn for integration
# it only keeps valueCols for integration
def IntegOutDf(df, integFcn, integOutCols, valueCols=None):
cols = list(df.columns)
if valueCols == None:
valueCols = list(filter(lambda x: x not in integOutCols, cols))
gCols = list(filter(lambda x: x not in (integOutCols + valueCols), cols))
if len(gCols) > 0:
cols = gCols + valueCols
df = df[cols]
if len(gCols) == 0:
gCols = ['tempCol']
df['tempCol'] = 1
g = df.groupby(by=gCols)
dfAgg = g.aggregate(integFcn)
dfAgg = dfAgg.reset_index()
if 'tempCol' in dfAgg.columns:
del dfAgg['tempCol']
return dfAgg
'''
size = 10
df = pd.DataFrame({
'categ1':np.random.choice(
a=['a', 'b', 'c', 'd', 'e'], size=size, replace=True),
'categ2':np.random.choice(a=['A', 'B'], size=size, replace=True),
'col1':np.random.uniform(low=0.0, high=100.0, size=size),
'col2':np.random.uniform(low=0.0, high=100.0, size=size)
})
print(df)
IntegOutDf(df, integFcn=sum, integOutCols=['categ1'], valueCols=['col1', 'col2'])
'''
## aggregates df with different agg fcns for multiple columns
# gCols is not needed since it will be assume to be
# (all columns - the columns being aggregated)
def AggWithDict(df, aggDict, gCols=None):
cols = list(df.columns)
valueCols = aggDict.keys()
if gCols == None:
gCols = list(filter(lambda x: x not in (valueCols), cols))
g = df.groupby(gCols)
dfAgg = g.aggregate(aggDict)
dfAgg = dfAgg.reset_index()
return dfAgg
'''
size = 10
df = pd.DataFrame({
'categ1':np.random.choice(
a=['a', 'b', 'c', 'd', 'e'],
size=size,
replace=True),
'categ2':np.random.choice(a=['A', 'B'], size=size, replace=True),
'col1':np.random.uniform(low=0.0, high=100.0, size=size),
'col2':np.random.uniform(low=0.0, high=100.0, size=size)
})
df = df.sort_values(['categ2', 'categ1', 'col1', 'col2'])
print(df)
aggDf0 = AggWithDict(df=df, aggDict={'col1':sum, 'col2':min})
aggDf1 = AggWithDict(df=df, aggDict={'col1':sum, 'col2':min}, gCols=['categ2'])
print(aggDf0)
print(aggDf1)
'''
## find rows which have repeated values on some cols
def FindRepRows(df, cols):
return pd.concat(g for _, g in df.groupby(cols) if len(g) > 1)
## slice df by sliceCol and with given values in sliceValues
def DfSliceDict(df, sliceCol, sliceValues=None):
if sliceValues == None:
sliceValues = list(set(df[sliceCol].values))
dfDict = {}
for i in range(len(sliceValues)):
v = sliceValues[i]
dfDict[v] = df[df[sliceCol] == v]
return dfDict
## merge dfDict
def MergeDfDict(dfDict, onCols, how='outer', naFill=None):
keys = dfDict.keys()
for i in range(len(keys)):
key = keys[i]
df0 = dfDict[key]
cols = list(df0.columns)
valueCols = list(filter(lambda x: x not in (onCols), cols))
df0 = df0[onCols + valueCols]
df0.columns = onCols + [(s + '_' + key) for s in valueCols]
if i == 0:
outDf = df0
else:
outDf = pd.merge(outDf, df0, how=how, on=onCols)
if naFill != None:
outDf = outDf.fillna(naFill)
return outDf
'''
def GenDf(size):
df = pd.DataFrame({
'categ1':np.random.choice(
a=['a', 'b', 'c', 'd', 'e'], size=size, replace=True),
'categ2':np.random.choice(a=['A', 'B'], size=size, replace=True),
'col1':np.random.uniform(low=0.0, high=100.0, size=size),
'col2':np.random.uniform(low=0.0, high=100.0, size=size)
})
df = df.sort_values(['categ2', 'categ1', 'col1', 'col2'])
return(df)
size = 5
dfDict = {'US':GenDf(size), 'IN':GenDf(size), 'GER':GenDf(size)}
MergeDfDict(dfDict=dfDict, onCols=['categ1', 'categ2'], how='outer', naFill=0)
'''
## split data based on values of a column: col
def SplitDfByCol(df, col):
#create unique list of device names
uniqueNames = df[col].unique()
#create a data frame dictionary to store your data frames
dfDict = {elem : pd.DataFrame for elem in uniqueNames}
for key in dfDict.keys():
dfDict[key] = df[df[col] == key]
return dfDict
## calculates value_counts() aka freq for combination of cols
def CombinFreqDf(
df, cols=None, countColName='cnt', propColName='prop (%)'):
if Type(df) == "Series":
df = pd.DataFrame(df)
if cols == None:
cols = list(df.columns)
if len(cols) < 2:
cols.append('dummy')
df['dummy'] = 'NA'
outDf = df[cols].groupby(cols).agg(len).reset_index()
outDf.columns = list(outDf.columns[:len(outDf.columns)-1]) + [countColName]
outDf[propColName] = 100.0 * outDf[countColName] / outDf[countColName].sum()
outDf = outDf.sort_values([countColName], ascending=[0])
if 'dummy' in cols:
del outDf['dummy']
outDf = outDf.reset_index(drop=True)
return outDf
'''
df0 = pd.DataFrame({
'app':['fb', 'fb', 'mailingFeat', 'mailingFeat'],
'party':['1P', '1P', '3P', '3P']})
CombinFreqDf(df=df0, cols=['app', 'party'])
'''
## maps a categorical variable with too many labels to less labels.
def Remap_lowFreqCategs(
df,
cols,
newLabels="nan",
otherLabelsToReMap=None,
freqThresh=5,
propThresh=0.1,
labelsNumMax=None):
df2 = df.copy()
k = len(cols)
if Type(freqThresh) == 'int':
freqThresh = [freqThresh] * k
if Type(propThresh) in ['int', 'float']:
propThresh = [propThresh] * k
if Type(newLabels) == 'str':
newLabels = [newLabels] * k
if (labelsNumMax is not None) and Type(labelsNumMax) == 'int':
labelsNumMax = [labelsNumMax] * k
def GetFreqLabels(i):
col = cols[i]
freqDf = CombinFreqDf(df[col])
ind = (freqDf["cnt"] > freqThresh[i]) & (freqDf["prop (%)"] > propThresh[i])
freqLabels = list(freqDf.loc[ind][col].values)
if labelsNumMax is not None:
maxNum = min(len(freqLabels), labelsNumMax[i])
freqLabels = freqLabels[0:(maxNum)]
if otherLabelsToReMap is not None:
freqLabels = list(set(freqLabels) - set(otherLabelsToReMap))
return freqLabels
freqLabelsList = [GetFreqLabels(x) for x in range(k)]
freqLabelsDict = dict(zip(cols, freqLabelsList))
def F(df):
for i in range(len(cols)):
col = cols[i]
newLabel = newLabels[i]
ind = [x not in freqLabelsDict[col] for x in df[col]]
if max(ind):
df.loc[ind, col] = newLabel
return df
return {"df":F(df2), "F":F, "freqLabelsDict":freqLabelsDict}
## this function works on a data frame with two categorical columns
## one is the category column
## one is the label column
## for each category it creates a distribution for the labels
def CalcFreqTablePerCateg(df, categCol, valueCol):
def AggFcnBuild(categValue):
def F(x):
return sum(x == categValue)/(1.0)
return F
df = df.fillna('NA')
labels = list(set(df[valueCol]))
def G(value):
AggFcn = AggFcnBuild(value)
dfAgg = df.groupby([categCol])[[valueCol]].agg(lambda x: AggFcn(x))
dfAgg = dfAgg.reset_index()
return dfAgg
value = labels[0]
dfAgg = G(value)
for i in range(1, len(labels)):
value = labels[i]
dfAgg1 = G(value)
dfAgg = pd.merge(dfAgg, dfAgg1, how='left', on=[categCol])
dfAgg.columns = [categCol] + labels
return {'df': dfAgg, 'labels': labels}
'''
size = 20
df0 = pd.DataFrame({
'categ':np.random.choice(a=['a', 'b', 'c'], size=size, replace=True),
'value':np.random.choice(a=['AA', 'BB', 'CC'], size=size, replace=True),
'col2':np.random.uniform(low=0.0, high=100.0, size=size),
'col3':np.random.uniform(low=0.0, high=100.0, size=size),
'col4':np.random.uniform(low=0.0, high=100.0, size=size)})
CalcFreqTablePerCateg(df=df0, categCol='categ', valueCol='value')['df']
'''
## merges a dict of tables
def MergeTablesDict(tabDict):
keys = tabDict.keys()
#print(keys)
n = len(keys)
for i in range(n):
key = keys[i]
tab = tabDict[key]
df = PropDfTab(tab)
df = df[['categ', 'freq', 'prop']]
df.columns = ['categ', 'freq_' + key, 'prop_' + key]
if i == 0:
outDf = df
else:
outDf = pd.merge(outDf, df, on=['categ'], how='outer')
outDf = outDf.reset_index(drop=True)
outDf = outDf.fillna(value=0)
return outDf
## creating a single string column using multiple columns (cols)
# and adding that to the data frame
def Concat_stringColsDf(df, cols, colName=None, sepStr='-'):
x = ''
if colName == None:
colName = sepStr.join(cols)
for i in range(len(cols)):
col = cols[i]
x = (x + df[col].map(str))
if (i < len(cols)-1):
x = x +'-'
df[colName] = x
return df
'''
df = pd.DataFrame({'a':range(3), 'b':['rr', 'gg', 'gg'], 'c':range(3)})
Concat_stringColsDf(df=df, cols=['a', 'b', 'c'], colName=None, sepStr='-')
'''
## flatten a column (listCol) of df with multiple values
def Flatten_RepField(df, listCol, sep=None):
if sep != None:
df = df.assign(**{listCol: df[listCol].str.split(',')})
outDf = pd.DataFrame({
col: np.repeat(df[col].values, df[listCol].str.len())
for col in df.columns.difference([listCol])
}).assign(
**{listCol: np.concatenate(df[listCol].values)})[df.columns.tolist()]
return outDf
'''
df = pd.DataFrame({'var1': ['a,b,c', 'd,e,f'], 'var2': [1, 2], 'var3':[5, 6]})
print(df)
Flatten_RepField(df, listCol='var1', sep=',')
'''
### tables p-value
## for a given table of frequencies and for each category calculates
## the total count of other categs (complement)
def TabCategCompl(tab, categCol, freqCol, complementCol=None):
categs = tab[categCol].values
s = tab[freqCol].sum()
complement = list()
for i in range(tab.shape[0]):
categ = categs[i]
tab0 = tab[(tab[categCol] == categ)]
x = tab0[freqCol].values[0]
c = s - x
complement.append(c)
if complementCol == None:
complementCol = freqCol + '_compl'
tab[complementCol] = complement
return tab
## does above for multiple columns
def TabCategComplMulti(tab, categCol, freqCols):
complementCols = []
for i in range(len(freqCols)):
freqCol = freqCols[i]
tab = TabCategCompl(tab=tab, categCol=categCol, freqCol=freqCol,
complementCol=None)
complementCol = freqCol + '_compl'
complementCols.append(complementCol)
cols = freqCols + complementCols
tab = tab[[categCol] + cols]
return tab
## adds a p-value per categ for comparing two frequencies
def TabComplPvalue(tab, categCol, freqCols):
tab = TabCategComplMulti(tab, categCol, freqCols)
n = tab.shape[0]
pvalueList = []
for i in range(n):
r = tab.iloc[i]
d = pd.DataFrame({'col1': [r[1], r[2]], 'col2': [r[3],r[4]]})
pvalue = scipy.stats.fisher_exact(table=d, alternative='two-sided')[1]
pvalueList.append(Signif(3)(pvalue))
tab['p-value'] = pvalueList
return tab
#### Useful functions for mapping a string column to another string column
## using a pattern string
## function for classification mapping
## it uses patterns to map to categories (general purpose)
def LabelByPattern(
x, patternDf, patternCol='pattern', categCol='category',
exactMatch=False):
# x a series
# patternDict has patterns and labels
# remove duplicate rows
import re
patternDf = patternDf.drop_duplicates(keep='first')
patterns = patternDf[patternCol]
categs = patternDf[categCol]
y = ['']*len(x)
outDf = pd.DataFrame({'x':x, 'y':y})
for i in range(len(patterns)):
pattern = patterns[i]
categ = categs[i]
hasCateg = x.str.contains(pattern)
if exactMatch:
hascateg = (x.str == pattern)
ind = np.where(hasCateg > 0)[0].tolist()
for j in ind:
if not bool(re.search(categ, y[j])):
y[j] = y[j] + categ
outDf['y'] = y
outDf.columns = ['signal', categCol]
return outDf
## label a data frame based on patternDf
# which includes pattern column and category column
def LabelByPatternDf(
df, signalCol, patternDf, patternCol, categCol,
newColName='mapped_category'):
patternDf = patternDf[[patternCol, categCol]]
patternDf = patternDf.drop_duplicates(keep='first')
patternDf = patternDf.reset_index(drop=True)
x = df[signalCol]
df2 = LabelByPattern(x=x, patternDf=patternDf, patternCol=patternCol,
categCol=categCol)
df[newColName] = df2[categCol]
return df
######################### Graphical/Plotting Functions ######################
## bar charts for multiple columns (yCols), with different colors
## x axis labels come from the xCol
def BarPlotMultiple(df, xCol, yCols, rotation=45, pltTitle=''):
x = range(len(df[xCol]))
colorList = ['r', 'm', 'g', 'y', 'c']
x = 8*np.array(x)
for i in range(len(yCols)):
col = yCols[i]
x1 = x + 1*i
plt.bar(x1, df[col], color=colorList[i], alpha=0.6, label=col)
locs, labels = plt.xticks()
plt.xticks(x1, df[xCol], rotation=rotation)
plt.setp(labels, rotation=rotation, fontsize=10)
plt.title(pltTitle + ': ' + xCol)
plt.legend()
import matplotlib.scale as mscale
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.ticker as ticker
import numpy as np
class SquareRootScale(mscale.ScaleBase):
"""
ScaleBase class for generating square root scale.
"""
name = 'squareroot'
def __init__(self, axis, **kwargs):
mscale.ScaleBase.__init__(self)
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(ticker.AutoLocator())
axis.set_major_formatter(ticker.ScalarFormatter())
axis.set_minor_locator(ticker.NullLocator())
axis.set_minor_formatter(ticker.NullFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
return max(0., vmin), vmax
class SquareRootTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def transform_non_affine(self, a):
return np.array(a)**0.5
def inverted(self):
return SquareRootScale.InvertedSquareRootTransform()
class InvertedSquareRootTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def transform(self, a):
return np.array(a)**2
def inverted(self):
return SquareRootScale.SquareRootTransform()
def get_transform(self):
return self.SquareRootTransform()
mscale.register_scale(SquareRootScale)
## compares the freq of usages given in labelCol
# and creates a joint distribution of labelCol given in x-axis
# and compareCol given with colors
# it either shows the joint probability (prop (%)) on y-axis
# or it will show the freq divided by denomConstant if prop == False
# labelCol: categorical var denoted in x-axis
# compareCol: categorical var denoted by colors
# countDistinctCols: what should be counted once, e.g. unit_id will count
# each item with given unit_id once
# prop = True, will calculates proportions, otherwise we divide counts by
# denomConstant,
# and if denomCountCols is given, we use it to count number of items
# and divide by (denomConstant * itemCount)
def PltCompare_bivarCategFreq(
df, labelCol, compareCol=None, countDistinctCols=None,
rotation=90, pltTitle='', compareOrder=None, limitNum=None,
prop=True, denomConstant=1.0, denomCountCols=None,
newColName="value", yScale=None):
if countDistinctCols is not None:
keepCols = [labelCol] + countDistinctCols
if compareCol is not None:
keepCols = keepCols + [compareCol]
if denomCountCols is not None:
keepCols = keepCols + denomCountCols
df = df[keepCols].drop_duplicates().reset_index()
if compareCol is None:
combinDf = CombinFreqDf(df[labelCol])
else:
combinDf = CombinFreqDf(df[[labelCol, compareCol]])
hue = compareCol
if limitNum is not None:
combinDf = combinDf[:limitNum]
if compareOrder is not None:
hue_order = compareOrder
respCol = "prop (%)"
#Mark(denomConstant, "denomConstant")
if denomCountCols is not None:
itemCount = len(df[denomCountCols].drop_duplicates().reset_index())
denomConstant = 1.0 * denomConstant * itemCount
#Mark(denomConstant, "denomConstant")
if prop is False:
combinDf[newColName] = combinDf["cnt"] / denomConstant
respCol = newColName
if compareCol is None:
sns.barplot(data=combinDf, x=labelCol, y=respCol)
else:
sns.barplot(data=combinDf, x=labelCol, hue=hue, y=respCol)
locs, labels = plt.xticks()
out = plt.setp(labels, rotation=rotation, fontsize=10)
plt.legend(loc='upper right')
if yScale is not None:
plt.yscale(yScale)
return combinDf
"""
df = pd.DataFrame({
"label":["cat", "dog", "cat", "dog", "dog", "cat", "cat", "dog"],
"gender":["M", "F", "M", "F", "F", "F", "F", "M"]})
PltCompare_bivarCategFreq(
df=df, labelCol="label", compareCol="gender")
PltCompare_bivarCategFreq(
df=df, labelCol="label", compareCol="gender",
prop=False, denomConstant=1.0, newColName="cnt per day")
"""
## make a boxplot for multiple columns Side by Side (Sbs, include mean with a star
def BoxPlt_dfColsSbS(
df, cols=None, pltTitle='', xlab='', ylab='value',
boxColors=['darkkhaki', 'royalblue', 'r', 'g', 'y', 'o', 'b'],
ylim=None):
from matplotlib.patches import Polygon
data = []
if cols is None:
cols = df.columns
for i in range(len(cols)):
col = cols[i]
data.append(df[col])
fig, ax1 = plt.subplots(figsize=(10, 6))
fig.canvas.set_window_title('')
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = plt.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_title(pltTitle, fontsize=20)
ax1.set_xlabel(xlab)
ax1.set_ylabel(ylab)
# Now fill the boxes with desired colors
numBoxes = len(data)
medians = list(range(numBoxes))
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = list(zip(boxX, boxY))
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i], label=cols[i])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], [np.average(data[i])],
color='w', marker='*', markeredgecolor='k')
if ylim is not None:
ax1.set_ylim(ylim)
plt.legend()
def CustomSortDf(df, col, orderedValues):
values = set(df[col].values)
remainingValues = list(set(df[col].values) - set(orderedValues))
orderedValues = orderedValues + remainingValues
df2 = pd.DataFrame(
{col:orderedValues, "dummy_order":range(len(orderedValues))})
df3 = pd.merge(df, df2, how="left", on=[col])
df3 = df3.sort_values(["dummy_order"])
df3 = df3.reset_index(drop=True)
del df3["dummy_order"]
return df3
"""
n = 10
df = pd.DataFrame({
'categ':np.random.choice(a=['a', 'b', 'c', 'd'], size=n, replace=True),
'col1':np.random.uniform(low=0.0, high=100.0, size=n),
'col2':np.random.uniform(low=0.0, high=100.0, size=n),
'col3':np.random.uniform(low=0.0, high=100.0, size=n),
'col4':np.random.uniform(low=0.0, high=100.0, size=n)})
col = "categ"
orderedValues = ["c", "a", "b"]
CustomSortDf(df=df, col=col, orderedValues=orderedValues)
"""
## it plots all columns wrt index
# it uses colors to compare them side by side.
def PltCols_wrtIndex(
df, cols=None, categCol=None, pltTitle='', ymin=None,
ymax=None, yLabel='', xLabel='', colorList=None,
orderedValues=None, alphaList=None, sciNotation=False,
ExtraFcn=None, orient='v',
sizeAlpha=0.75, legendColAlpha=2):
df2 = df.copy()
if cols is None:
cols = list(df2.columns)
if categCol is not None:
df2.index = df2[categCol]
if (categCol in cols):
cols = list(set(cols) - set([categCol]))
# cols = cols.remove(categCol)
# print(cols)
# Mark(categs)
if orderedValues is not None:
df2 = CustomSortDf(df=df2, col=categCol, orderedValues=orderedValues)
df2.index = df2[categCol]
categs = df2.index
num = len(categs)
x = range(num)
if colorList is None:
colorList = [
'r', 'g', 'm', 'y', 'c', 'darkkhaki', 'royalblue',
'darkred', 'crimson', 'darkcyan', 'gold', 'lime', 'black',
'navy', 'deepskyblue', 'k']
if alphaList is None:
alphaList = [0.7] * len(cols)
stretch = 4 * len(cols)
x = stretch * np.array(x)
if orient == 'v':
fig, ax = plt.subplots(figsize=(15*sizeAlpha, 10*sizeAlpha),
dpi=1200, facecolor='w', edgecolor='k')
for i in range(len(cols)):
col = df2[cols[i]]
plt.bar(x + 2*i, col.values, alpha=alphaList[i], label=cols[i],
color=colorList[i], width=2, edgecolor='black',
linewidth=2.0*sizeAlpha)
plt.title(pltTitle, fontsize=20, fontweight='bold')
if sciNotation:
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
#labels = [item.get_text() for item in ax.get_xticklabels()]
labels = categs
ax.set_xticklabels(labels)
locs, labels = plt.xticks(x, categs)
plt.setp(labels, rotation=15, fontsize=17*sizeAlpha, fontweight='bold')
locs2, labels2 = plt.yticks()
plt.setp(labels2, rotation=0, fontsize=17*sizeAlpha, fontweight='bold')
ncol = len(cols) / legendColAlpha
plt.legend(
loc='best',
fontsize=17,
prop={'weight': 'semibold',
'size': 17 * sizeAlpha},
ncol=ncol)
axes = plt.gca()
axes.set_ylim([ymin, ymax])
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
if ExtraFcn is not None:
ExtraFcn(ax)
if orient == 'h':
fig, ax = plt.subplots(figsize=(10*sizeAlpha, 15*sizeAlpha), dpi=1200,
facecolor='black', edgecolor='black')
for i in range(len(cols)):
col = df2[cols[i]]
plt.barh(
x + 2 * (i - 1),
col.values,
alpha=alphaList[i],
label=cols[i],
color=colorList[i],
height=2,
edgecolor='black',
linewidth=2.0 * sizeAlpha)
plt.title(pltTitle, fontsize=20*sizeAlpha, fontweight='bold')
if sciNotation:
plt.ticklabel_format(
style='sci', axis='x', scilimits=(0, 0), prop={'weight': 'bold'})
labels = categs
ax.set_yticklabels(labels)
locs, labels = plt.yticks(x, categs)
plt.setp(labels, rotation=0, fontsize=17*sizeAlpha, fontweight='bold')
locs2, labels2 = plt.xticks()
plt.setp(labels2, rotation=20, fontsize=17*sizeAlpha, fontweight='bold')
ncol = len(cols) / legendColAlpha
plt.legend(
loc='best',
ncol=ncol,
prop={'weight': 'semibold',
'size': 17 * sizeAlpha})
axes = plt.gca()
axes.set_xlim([ymin, ymax])
ax.set_xlabel(yLabel)
ax.set_ylabel(xLabel)
if ExtraFcn != None:
ExtraFcn(ax)
plt.gca().invert_yaxis()
return {'fig': fig, 'ax': ax}
'''
n = 3
df = pd.DataFrame({
'categ':np.random.choice(
a=['a', 'b', 'c', 'd', 'e', 'f'],
size=n,
replace=False),
'col1':np.random.uniform(low=0.0, high=100.0, size=n),
'col2':np.random.uniform(low=0.0, high=100.0, size=n),
'col3':np.random.uniform(low=0.0, high=100.0, size=n),
'col4':np.random.uniform(low=0.0, high=100.0, size=n)})
orderedValues = ["c", "a", "b", "d", "f", "e"]
PltCols_wrtIndex(
df=df,
cols=['col1', 'col2', 'col3', 'col4'],
categCol='categ',
orderedValues=orderedValues,
orient='v',
sciNotation=True)
'''
## this function creates a plot with each bar representing the distribution
# for a category (given in categCol)
# each distribution is defined on a set of labels
# the distributions are given in each column
def Plt_stackedDist_perCateg(
df, categCol, cols=None, labels=None,
sortCols=None, figsize=(10, 5), mainText=''):
import colorsys
if cols == None:
cols = list(df.columns[1:len(df.columns)])
if labels == None:
labels = cols
if sortCols == None:
sortCols = cols
df = df.sort(sortCols, ascending=False)
m = len(cols)
HSV_tuples = [(x*1.0/m, 0.5, 0.5) for x in range(m)]
RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]
n = df.shape[0]
x = pd.Series(n*[0])
bar_locations = np.arange(n)
fig, ax = plt.subplots(figsize=figsize)
for i in range(len(cols)):
col = cols[i]
y = df[col].values
ax.bar(bar_locations, y, bottom=x, color=RGB_tuples[i], label=labels[i])
x = x + y
plt.legend(loc="best")
bar_locations2 = np.arange(n) + 0.5
plt.xticks(bar_locations2, df[categCol].values, rotation='vertical')
plt.title(mainText)
print(bar_locations)
print(df.loc[0].values)
fig.show()
'''
df0 = pd.DataFrame({'country':['JP', 'US', 'FR'],
'col1':np.random.uniform(low=0.0, high=100.0, size=3),
'col2':np.random.uniform(low=0.0, high=100.0, size=3),
'col3':np.random.uniform(low=0.0, high=100.0, size=3),
'col4':np.random.uniform(low=0.0, high=100.0, size=3)})
Plt_stackedDist_perCateg(
df=df0, categCol='country', cols=['col1', 'col2', 'col3', 'col4'], labels=None,
sortCols=None, figsize=(10, 5), mainText='')
'''
## compares the values (valueCol) for the index (pivotIndCol)
# for various classes given in compareCol
# first it pivots the data and then plots side by side
def PivotPlotWrt(
df, pivotIndCol, compareCol, valueCol,
cols=None, pltTitle='', sizeAlpha=0.75):
dfPivot = df.pivot(index=pivotIndCol, columns=compareCol, values=valueCol)
dfPivot = dfPivot.fillna(0)
dfPivot[pivotIndCol] = dfPivot.index
if cols is None:
cols = list(set(df[compareCol].values))
p = PltCols_wrtIndex(
df=dfPivot,
cols=cols,
categCol=pivotIndCol,
orient='h',
pltTitle=pltTitle,
sizeAlpha=sizeAlpha)
return {'df':dfPivot, 'plt':p}
## creating quantiles for a continuous variable and removing repetitions
def Qbins(x, num=10):
qs = list(
np.percentile(a=x,
q=list(100 * np.linspace(
start=0,
stop=(1 - 1.0/num),
num=num))))
qs = list(set([float("-inf")] + qs + [float("inf")]))
qs.sort()
return qs
# cuts uniformly
def Ubins(x, num=10):
b = np.linspace(start=min(x), stop=max(x), num=num)
b = [Signif(2)(x) for x in b]
b = list(set(b))
b = [float("-inf")] + b + [float("inf")]
b.sort()
return b
## cuts using quantiles
def CutQ(x, num=10):
qs = Qbins(x, num)
discX = pd.cut(x, bins=qs)
return(discX)
## make a bar plot
def BarPlot(y, yLabels, ylim=None, pltTitle='', figSize=[5, 5]):
n = len(y)
x = pd.Series(n*[0])
bar_locations = np.arange(n)
fig, ax = plt.subplots()
fig.set_size_inches(figSize[0], figSize[1])
ax.bar(bar_locations, y, bottom=x, color='r')
plt.legend(loc="best")
bar_locations2 = np.arange(n) + 0.5
plt.xticks(bar_locations2, yLabels, rotation='vertical')
axes = plt.gca()
axes.set_ylim(ylim)
plt.title(pltTitle)
fig.show()
## creates a cut column with NA being an explicit category
def ExplicitNa_cutCol(df, col, cuts, newCol=None):
if newCol is None:
newCol = col + '_range'
df[newCol] = pd.cut(df[col], cuts)
df[newCol] = df[newCol].cat.add_categories(["NA"])
df[newCol] = df[newCol].fillna("NA")
return df
'''
z = np.random.normal(loc=50.0, scale=20.0, size=10)
z = np.insert(z, 0, float('nan'))
df0 = pd.DataFrame({'z':z})
ExplicitNa_cutCol(
df=df0,
col='z',
cuts=[-20, 0, 20, 40, 60, 80, 100, 120, 140, float('inf')], newCol=None)
'''
## order df based on a cut column
def OrderDf_cutCol(df, cutCol, orderCol='order'):
def F(s):
x = re.search(r'.*?\((.*),.*', s)
if x is None:
return(float('-inf'))
return(float(x.group(1)))
df['order'] = df[cutCol].map(F)
df = df.sort_values('order')
return df
'''
z = np.random.normal(loc=50.0, scale=20.0, size=10)
z = np.insert(z, 0, float('nan'))
df0 = pd.DataFrame({'z':z})
u = pd.cut(z, [-20, 0, 20, 40, 60, 80, 100, 120, 140, float('inf')])
df0['col'] = u
df0['col'] = df0['col'].cat.add_categories(["NA"])
df0['col'] = df0['col'].fillna("NA")
OrderDf_cutCol(df=df0, cutCol="col")
'''
## for a variable generated with pd.cut it make a barplot
# it orders the labels based on the their values (rather than freq)
def FreqPlot_cutCol(u, pltTitle='', figSize=[5, 5]):
tab = u.value_counts()
df0 = pd.DataFrame(tab)
df0['label'] = list(df0.index)
df0 = OrderDf_cutCol(df=df0, cutCol='label')
df0.columns = ['value', 'label', 'order']
df0 = df0.sort_values('order')
BarPlot(
y=df0['value'], yLabels=df0['label'], pltTitle=pltTitle, figSize=figSize)
return df0
'''
z = np.random.normal(loc=50.0, scale=20.0, size=1000)
u = pd.cut(z, [-20, 0, 20, 40, 60, 80, 100, 120, 140, float('inf')])
FreqPlot_cutCol(u)
'''
def PropDfTab(tab, ylim=None, categCol='categ', pltIt=False, pltTitle=''):
d = pd.DataFrame(tab)
d.columns = ['freq']
e = (100.0 * d.values) / sum(d.values)
e = [Signif(5)(x) for x in e]
d['prop'] = e
d[categCol] = d.index
if pltIt:
BarPlot(y=e, yLabels=list(d.index), ylim=ylim, pltTitle=pltTitle)
return d
## cut continuous var
def CutConti(x, num=10, method='quantile'):
if method == 'quantile':
b = Qbins(x, num=num)
elif (method == 'uniform'):
b = Ubins(x, num=num)
z = pd.cut(x, bins=b)
return z
## gets a continuous var x, partitions the real line based on quantiles
# or bins of x
# then generates a function which assigns levels to any new value/values
def CutContiFcn(
x,
num=10,
method='quantile',
intervalColName='interval',
levelColName='level',
levelsType='int',
levelPrefix='Lev',
rawValueColName='raw'):
if method == 'quantile':
b = Qbins(x, num=num)
print(b)
elif (method == 'uniform'):
b = Ubins(x, num=num)
intervals = sorted(set(pd.cut(x + b[1:], bins=b)))
if ():
levels = [levelPrefix + str(x) for x in range(len(intervals))]
Mark(levels)
levDf = pd.DataFrame({intervalColName:intervals, levelColName:levels})
Mark(levDf)
def F(u):
z = pd.cut(u, bins=b)
df0 = pd.DataFrame({rawValueColName:u, intervalColName: z})
outDf = pd.merge(df0, levDf, on=[intervalColName], how='left')
return(outDf)
return F
'''
x = [1, 3, 4, 5, 66, 77, 88]
F = CutContiFcn(
x, num=10, method='quantile', intervalColName='interval',
levelColName='level', levelPrefix='Lev', rawValueColName='raw')
F(x)
F(x + [5, 1, 3, 100, -1 , 90, 2.2])
'''
## cuts continuous data and creates a bar plot
def CutBarPlot(x, num=10, method='quantile', pltTitle='', figSize=[5, 5]):
z = CutConti(x, num=num, method=method)
u = z.value_counts()
#print(u)
d = pd.DataFrame(u)
d = 100.0 * (d / d.sum())
d = d.sort_index()
BarPlot(y=d.values, yLabels=d.index, ylim=None, pltTitle=pltTitle,
figSize=figSize)
## returns a functions which calculates quantiles according to q (which )
def QuantileFcn(q):
def F(x):
return(np.percentile(a=x, q=q))
return F
def Plt_quantilesPerSlice(
df, sliceCol, respCol, gridNum=100.0, pltTitle=''):
slices = list(set(df[sliceCol].values))
outDict = {}
for sl in slices:
x = df[df[sliceCol] == sl][respCol].values
grid = list(
gridNum * np.linspace(
start=1.0/float(gridNum),
stop=(1 - 1.0/float(gridNum)),
num=int(gridNum)))
q = QuantileFcn(grid)(x)
outDict[sl] = q
plt.plot(grid, q, label=sl)
plt.legend()
plt.title(pltTitle)
return pd.DataFrame(outDict)
'''
df = pd.DataFrame({
'value':[1, 1, 1, 2, 3, 4, 2, 2, 2],
'categ':['a', 'a', 'b', 'b', 'b', 'a', 'a', 'b', 'a']})
Plt_quantilesPerSlice(df=df, sliceCol='categ', respCol='value', pltTitle='')
'''
## takes a vector of labels, eg pandas series
# it returns a freq table with props in dataframe format
def GenFreqTable(x, rounding=None):
freqTab = x.value_counts()
distbnTab = 100.0 * x.value_counts() / freqTab.sum()
labels = freqTab.keys()
freqValues = list(freqTab)
propValues = list(distbnTab)
if rounding is not None:
propValues = [Signif(rounding)(x) for x in propValues]
outDict = {'label':labels, 'freq':freqValues, 'prop':propValues}
outDf = pd.DataFrame(outDict)
return outDf[['label', 'freq', 'prop']]
'''
x = pd.Series(['a', 'a', 'b', 'b', 'c'])
print(GenFreqTable(x))
'''
## builds a categ distbn for each combination after groupby indCols
def CategDistbnDf(df, indCols, categCol, rounding=None):
def F1(x):
return tuple(GenFreqTable(x)['label'].values)
def F2(x):
return tuple(GenFreqTable(x)['freq'].values)
def F3(x):
return tuple(GenFreqTable(x, rounding=4)['prop'].values)
df = df[indCols + [categCol]].copy()
df[categCol + '_freq'] = df[categCol]
df[categCol + '_prop'] = df[categCol]
g = df.groupby(indCols)
outDf = g.aggregate({categCol:F1 , categCol + '_freq':F2,
categCol + '_prop':F3})
outDf = outDf.reset_index()
return outDf[BringElemsToFront(outDf.columns, indCols + [categCol])]
'''
df = pd.DataFrame({
'user_id':[1, 1, 1, 2, 2, 2, 2, 2, 1, 1],
'interface':['A', 'A', 'A', 'B', 'B', 'B', 'A', 'A', 'A', 'B'],
'categ':['a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'b', 'b']})
dnDf = CategDistbnDf(
df=df, indCols=['user_id', 'interface'], categCol='categ', rounding=None)
'''
## compare label distribution across slices
def LabelDistbn_acrossSlice(
df,
sliceCol,
labelCol,
slices=None,
orderedValues=None,
pltIt=True,
pltTitle='',
orderIntervals=False,
sortBySlice=False,
limitLabelNum=20):
def F(group):
return CombinFreqDf(group[[sliceCol, labelCol]])
g = df.groupby([sliceCol], as_index=False)
outDf = g.apply(F).reset_index(drop=True)
if slices is None:
slices = list(set(df[sliceCol].values))
horizDf = None
for s in slices:
s = str(s)
subDf = outDf[outDf[sliceCol].map(str) == s][[labelCol, 'cnt', 'prop (%)']]
subDf.columns = [labelCol, s + '_cnt', s + '_prop (%)']
#Mark(subDf[:2])
if horizDf is None:
horizDf = subDf
horizDf['total_cnt'] = subDf[s + '_cnt']
else:
horizDf = pd.merge(horizDf, subDf, on=labelCol, how='outer')
horizDf['total_cnt'] = horizDf['total_cnt'] + horizDf[s + '_cnt']
#Mark(subDf, 'subDf')
#Mark(horizDf, 'horizDf')
print(horizDf)
horizDf = horizDf.sort_values(['total_cnt'], ascending=[0])
if orderIntervals:
horizDf = OrderDf_cutCol(df=horizDf, cutCol=labelCol, orderCol='order')
if sortBySlice:
horizDf.sort_values([sliceCol])
if limitLabelNum is not None:
horizDf = horizDf[:limitLabelNum]
p = None
if pltIt:
p = PltCols_wrtIndex(
df=horizDf,
cols=[str(x) + '_prop (%)' for x in slices],
categCol=labelCol,
orderedValues=orderedValues,
orient='h',
pltTitle=pltTitle)
return {'outDf':outDf, 'horizDf':horizDf, 'p':p}
'''
df = GenUsageDf_forTesting()
Mark(df[:2])
res = LabelDistbn_acrossSlice(
df=df, sliceCol='expt', labelCol='prod', pltIt=True)
res['p']
res = LabelDistbn_acrossSlice(
df=df,
sliceCol='expt',
labelCol='prod',
orderedValues=[],
pltIt=True)
'''
# make a single label distbn
def LabelDistbn(
df,
labelCol,
orderIntervals=False,
pltTitle="",
CustomOrder=None,
figSize=[10, 8]):
out = CombinFreqDf(df[[labelCol]])
del out['cnt']
out['prop (%)'] = out['prop (%)'].map(Signif(3))
if orderIntervals:
out = OrderDf_cutCol(df=out, cutCol=labelCol, orderCol='order')
del out['order']
if CustomOrder is not None:
out = CustomOrder(out)
fig, ax = plt.subplots();
fig.set_size_inches(figSize[0], figSize[1])
plt.bar(range(len(out)), out['prop (%)'])
plt.xticks(np.array(range(len(out))) + 0.5, out[labelCol], rotation=90)
plt.grid(False)
plt.grid(axis='y', linewidth=1, color='red', alpha=0.5)
if pltTitle == "":
pltTitle = labelCol + " distbn"
plt.title(pltTitle, fontsize=20, fontweight='bold')
return out
##
def LabelDistbn_perSlice(
df,
sliceCol,
labelCol,
pltIt=True,
pltTitle='',
orderIntervals=False,
sortBySlice=False,
labels=None,
sizeAlpha=0.75):
def F(group):
return CombinFreqDf(group[[sliceCol, labelCol]])
g = df.groupby([labelCol], as_index=False)
outDf = g.apply(F).reset_index(drop=True)
if labels is None:
labels = list(set(df[labelCol].values))
horizDf = None
for l in labels:
l = str(l)
subDf = outDf[outDf[labelCol].map(str) == l][[sliceCol, 'cnt', 'prop (%)']]
subDf.columns = [sliceCol, l + '_cnt', l + '_prop (%)']
if horizDf is None:
horizDf = subDf
horizDf['total_cnt'] = subDf[l + '_cnt']
else:
horizDf = pd.merge(horizDf, subDf, on=sliceCol, how='outer')
horizDf = horizDf.fillna(0)
horizDf['total_cnt'] = horizDf['total_cnt'] + horizDf[l + '_cnt']
horizDf = horizDf.sort_values(['total_cnt'], ascending=[0])
if orderIntervals:
horizDf = OrderDf_cutCol(df=horizDf, cutCol=sliceCol, orderCol='order')
if sortBySlice:
horizDf = horizDf.sort_values([sliceCol])
horizDf = horizDf[:20]
p = None
for l in labels:
horizDf[l + '_%'] = 100 * (horizDf[l + '_cnt'] / horizDf['total_cnt'])
if pltIt:
p = PltCols_wrtIndex(
df=horizDf,
cols=[str(x) + '_%' for x in labels],
categCol=sliceCol,
orient='h',
pltTitle=pltTitle,
sizeAlpha=sizeAlpha)
return {'outDf':outDf, 'horizDf':horizDf, 'p':p}
## interpolate missing categ values wrt certain columns
# condDict will determine which subset of data should be used to make predictions
# replacedValues are the values which need replacement/interpolation
# dropUnassigned is to determine if we should keep rows which remain unassigned around
def InterpCategColWrt(
df, yCol, xCols, condDict={}, replacedValues=None, dropUnassigned=True):
df2 = df.copy()
if len(condDict) > 0:
ind = BuildCondInd(df=df2, condDict=condDict)
df2 = df2[ind].copy()
if replacedValues is not None:
df2 = df2[~df2[yCol].isin(replacedValues)].copy()
predDf = CategDistbnDf(df=df2, indCols=xCols, categCol=yCol)
predDf[yCol + '_pred'] = predDf[yCol].map(lambda x: x[0])
ind = df[yCol].isin(replacedValues)
df3 = df[ind].copy()
df4 = pd.merge(df3, predDf[xCols + [yCol + '_pred']], on=xCols, how='left')
df.loc[ind, yCol] = df4[yCol + '_pred'].values
if dropUnassigned:
df = df.dropna(subset=[yCol])
return df
'''
df = pd.DataFrame({
'user_id':[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3],
'os':['and', 'and', 'and', 'randSurface', 'randSurface', 'randSurface', 'randSurface', 'and', 'and', 'and', 'randSurface', 'randSurface', 'randSurface', 'randSurface', 'randSurface'],
'y':[None, 'c', 'b', 'b', 'b', 'b', 'a', 'a', 'a', 'b', 'b', 'nan', 'b', 'b', None],
'country': ['us', 'us', 'us', 'us', 'us', 'jp', 'us', 'jp', 'us', 'us', 'us', 'us', 'jp', 'us', 'us']})
print(df)
InterpCategColWrt(df=df,
yCol='y',
xCols=['user_id', 'os'],
condDict={'country':['us']},
replacedValues=['nan', None],
dropUnassigned=False)
'''
## for a df with sliceCols, it groups by sliceCols
# and for each categCols combination,
# it adds a total count column for the valueCol
# for example sliceCols: country, categCols=[sequence, event_1st, event_2nd]
# and valueCol=sequence_count
# we can figure out the total frequency of each sequence in each country
# as well as the frequency of the first event for the same country (sliceCols)
# we also agg a grand total for the valueCols for each combination of sliceCols
def AddTotalsDf(
df, categCols, valueCols, sliceCols=[], aggFnDict=sum,
integOutOther=False):
## if there are no sliceCols, we generate a tempCol to be sliceCol
## then we delete it at the end
l = len(sliceCols)
if l == 0:
sliceCols = ['tempCol']
df['tempCol'] = 1
## integrates out wrt sliceCols + categCols first.
## so other columns will be dropped
## and the valueCols will be integrated out across,
## when there are repeated sliceCols + categCol even if there is no extra col
if integOutOther:
df = df[sliceCols + categCols + valueCols]
g = df.groupby(sliceCols + categCols)
df = g.agg(aggFnDict)
df = df.reset_index()
df0 = df[sliceCols + categCols + valueCols]
outDf = df.copy()
for categCol in categCols:
g = df0.groupby(sliceCols + [categCol])
aggDf = g.agg(aggFnDict)
aggDf= aggDf.reset_index()
aggDf.columns = (sliceCols +
[categCol] +
[categCol + '_' + x + '_agg' for x in valueCols])
outDf = pd.merge(outDf, aggDf, on=sliceCols + [categCol], how='left')
# add slice (sliceCols slice) totals: same as above but we drop the categCol
df0 = df[sliceCols + valueCols]
g = df0.groupby(sliceCols)
aggDf = g.agg(aggFnDict)
aggDf= aggDf.reset_index()
aggDf.columns = sliceCols + [x + '_slice_total' for x in valueCols]
outDf = pd.merge(outDf, aggDf, on=sliceCols, how='left')
# reorder the columns
cols = (sliceCols +
sorted(categCols) +
valueCols +
list(sorted(set(outDf) - set(sliceCols + categCols + valueCols))))
outDf = outDf[cols]
## remove extra column if it was created
if l == 0:
del outDf['tempCol']
return outDf
'''
df = pd.DataFrame({
'country':['JP', 'JP', 'JP', 'BR', 'BR', 'BR', 'JP', 'JP', 'JP', 'BR'],
'seq':['a>b', 'a>b', 'b>a', 'b>a', 'a>b', 'a>b', 'a>c', 'a>c', 'b>c', 'c>b'],
'1st':['a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'b', 'c'],
'2nd':['b', 'b', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'b'],
'count':[10, 11, 1, 20, 2, 2, 2, 200, 1, 1],
'utility':[-10, -11, 1, 20, 2, 2, 2, -200, 1, 1],})
sliceCols = ['country']
categCols = ['seq', '1st', '2nd']
valueCols = ['count', 'utility']
aggFnDict = {'count':sum, 'utility':np.mean}
AddTotalsDf(
df=df, categCols=categCols, valueCols=valueCols,
sliceCols=sliceCols, aggFnDict=sum)
AddTotalsDf(
df=df, categCols=categCols, valueCols=valueCols,
sliceCols=sliceCols, aggFnDict=aggFnDict)
AddTotalsDf(
df=df, categCols=categCols, valueCols=valueCols,
sliceCols=sliceCols, aggFnDict=aggFnDict, integOutOther=True)
AddTotalsDf(
df=df, categCols=categCols, valueCols=valueCols,
sliceCols=[], aggFnDict=aggFnDict, integOutOther=True)
'''
## for a data frame with a countCol, we do bootstrap
def BsWithCounts(df, countCol=None):
if countCol == None:
n = df.shape[0]
ind = np.random.choice(a=n, size=n, replace=True, p=None)
df2 = df.iloc[ind]
return(df2)
df = df.reset_index(drop=True)
rowInd = list(range(len(df)))
counts = df[countCol].values
longInd = []
for a, b in zip(rowInd, counts):
longInd.extend([a] * b)
bsLongInd = np.random.choice(
a=longInd, size=len(longInd), replace=True, p=None)
bsIndDf = pd.DataFrame(pd.Series(bsLongInd).value_counts())
bsRowInd = list(bsIndDf.index)
bsCounts = bsIndDf[0].values
df2 = df.iloc[bsRowInd]
df2[countCol] = bsCounts
df2 = df2.reset_index(drop=True)
return df2
'''
df = pd.DataFrame({
'a':['cats', 'horses', 'dogs', 'wolves'],
'count':[2, 10, 4, 1]})
Mark(df, 'original df')
countCol = 'count'
Mark(BsWithCounts(df, countCol), ' using counts')
Mark(BsWithCounts(df, countCol=None), ' not using counts')
'''
## get a sublist with unique elements
# while preserving order
def UniqueList(l):
seen = set()
seen_add = seen.add
return [x for x in l if not (x in seen or seen_add(x))]
'''
x = [1, 2, 1, 1, 2, 3] + range(100000) + [1, 2, 1, 1, 2, 3]
tic = time.clock()
UniqueList(x)
toc = time.clock()
Mark((toc-tic)*100)
tic = time.clock()
set(x)
toc = time.clock()
Mark((toc-tic)*100)
'''
## bring certain elements (x) of a list (l) to front
# without re-ordering others
def BringElemsToFront(l, subList):
front = []
for k in range(len(subList)):
front = front + [j for i,j in enumerate(l) if j == subList[k]]
end = [j for i,j in enumerate(l) if not (j in subList)]
return front + end
'''
BringElemsToFront(l=[1, 2, 3, 1], subList=[1])
BringElemsToFront(l=[1, 2, 3, 1, 4, 5,], subList=[1, 4, 5])
'''
## get a fcn which returns a color grid of size n
def GetColorGridFcn(n):
'''Returns a function that maps each index in 0, 1, ... N-1 to a distinct
RGB color.'''
color_norm = matplotlib.colors.Normalize(vmin=0, vmax=n-1)
scalar_map = matplotlib.cm.ScalarMappable(norm=color_norm, cmap='hsv')
def map_index_to_rgb_color(index):
return scalar_map.to_rgba(index)
return map_index_to_rgb_color
'''
def main():
n = 5
fig=plt.figure()
ax=fig.add_subplot(111)
plt.axis('scaled')
ax.set_xlim([ 0, n])
ax.set_ylim([-0.5, 0.5])
cmap = GetColorGridFcn(n)
for i in range(n):
col = cmap(i)
rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col)
ax.add_artist(rect)
ax.set_yticks([])
plt.show()
if __name__=='__main__':
main()
'''
## takes a dictionary of lists to one string
def DictOfLists_toString(
d,
dictElemSepr='__',
listElemSepr='_',
keyValueSepr=':',
noneString=''):
if d == None or d == {}:
return(noneString)
keys = d.keys()
keys = map(str, keys)
keys.sort()
out = ''
for key in keys:
if (d[key] != None):
l = [str(x) for x in d[key]]
value = str(listElemSepr.join(l))
if out != '':
out = out + dictElemSepr
out = out + key + keyValueSepr + value
return out
'''
d = {'z':[2], 'd':[1], 'e':[2]}
DictOfLists_toString(d)
d = {'z':[2], 'd':[1], 'e':None}
DictOfLists_toString(d)
condDict = {'form_factor':['PHN']}
condDict = {'form_factor':None}
condDict = {'form_factor':['PHN'], 'country':['JP']}
condDict = None
condStr = DictOfLists_toString(condDict, dictElemSepr='__', listElemSepr='_')
'''
## plotting confidence intervals given in each row
# will label the rows in labelCol is given
def PlotCI(df, colUpper, colLower, y=None, col=None, ciHeight=0.5,
color='grey', labelCol=None, pltLabel=''):
if y is None:
y = range(len(df))
minCiWidth = (df[colUpper] - df[colLower]).min()
if col is not None:
## following was troubling in log scale,
# the width of the lines were changing in visualization (not desired)
'''
p = plt.barh(
bottom=y,
width=np.array([minCiWidth]*len(y)),
left=df[col],
height = ciHeight,
color='green',
alpha=1,
label=None)
'''
for i in range(len(y)):
plt.plot(
[df[col].values[i],
df[col].values[i]],
[y[i], y[i] + ciHeight],
color=color,
linestyle='-',
alpha=0.7,
linewidth=4)
plt.plot(
[df[col].values[i],
df[col].values[i]],
[y[i], y[i] + ciHeight],
color="black",
linestyle='-',
alpha=0.5,
linewidth=2,
dashes=[6, 2])
if int(matplotlib.__version__[0]) < 3:
p = plt.barh(
bottom=y,
width=(df[colUpper]-df[colLower]).values,
left=df[colLower],
color=color,
edgecolor='black',
height=ciHeight,
alpha=0.6,
label=pltLabel)
else:
p = plt.barh(
y=y,
width=(df[colUpper]-df[colLower]).values,
left=df[colLower],
align="edge",
color=color,
edgecolor='black',
height=ciHeight,
alpha=0.6,
label=pltLabel)
if labelCol is not None:
plt.yticks(y, df[labelCol].values, rotation='vertical');
'''
df0 = pd.DataFrame({'med':[1, 2, 3, 10], 'upper':[2, 5, 6, 12],
'lower':[-1, -2, -3, 4], 'categ':['a', 'b', 'c', 'd']})
PlotCI(df=df0, colUpper='upper', colLower='lower', y=None, col='med',
ciHeight=0.5, color='grey', labelCol='categ', pltLabel='')
'''
## compares the CI's for available labels in labeCol
# we do that for each slice with different color to compare
def PlotCIWrt(
df, colUpper, colLower, sliceCols, labelCol, col=None,
ciHeight=0.5, rotation = 0, addVerLines=[], logScale=False,
lowerLim=None, pltTitle='', figSize=[5, 20]):
df2 = Concat_stringColsDf(
df=df.copy(),
cols=sliceCols,
colName='slice_comb',
sepStr='-')
labelSet = UniqueList(df2[labelCol].values)
labelIndDf = pd.DataFrame({labelCol: labelSet})
labelIndDf = labelIndDf.sort_values([labelCol])
labelIndDf['labelInd'] = range(len(labelSet))
n = len(labelIndDf)
## groupby each slice
slicesSet = set(df2['slice_comb'])
g = df2.groupby(['slice_comb'])
sliceNum = len(g)
sliceNames = list(g.groups.keys())
sliceNames.sort()
ColorFcn = GetColorGridFcn(sliceNum + 2)
plt.figure(1);
fig, ax = plt.subplots();
fig.set_size_inches(figSize[0], figSize[1]*(n/20.0))
for i in range(sliceNum):
sliceName = sliceNames[i]
df3 = g.get_group(sliceName)
df3 = pd.merge(df3, labelIndDf, on=[labelCol], how='outer')
df3 = df3.sort_values([labelCol])
df3 = df3.fillna(0)
ciHeight = 1.0 / sliceNum
shift = ciHeight * i
y = [(float(x) + shift) for x in range(n)]
assert (len(df3) == len(y)),("len(y) must be the same as merged df (df3)." +
" This might be because of repeated rows in df3")
PlotCI(
df=df3, colUpper=colUpper, colLower=colLower, y=y, col=col,
ciHeight=ciHeight, color=ColorFcn(i + 1), labelCol=labelCol,
pltLabel=sliceName)
for j in range(n + 1):
plt.axhline(y=j, color='grey', alpha=0.95)
labels = [item.get_text() for item in ax.get_xticklabels()]
labels = list(labelIndDf[labelCol].values)
ax.set_yticklabels(labels)
locs, labels = plt.yticks([(float(x) + 0.5) for x in range(n)], labels)
plt.setp(labels, rotation=rotation, fontweight='bold', fontsize="large")
for x in addVerLines:
plt.axvline(x=x, color='orange', alpha=0.5)
if logScale:
plt.xscale('log')
if len(addVerLines) > 0:
#labels = [item.get_text() for item in ax.get_xticklabels()]
#ax.set_xticklabels(map(str, addVerLines))
ax = plt.gca() # grab the current axis
ax.set_xticks(addVerLines) # choose which x locations to have ticks
ax.set_xticklabels(addVerLines) # set the labels to display at those ticks
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=2, mode="expand", borderaxespad=0.)
plt.legend(
bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
prop={'weight':'bold', 'size':'large'})
plt.xlim((lowerLim, None))
plt.xlim((lowerLim, None))
plt.title(
pltTitle, fontname="Times New Roman",fontweight="bold",
fontsize="x-large")
return fig
#plt.show()
'''
## Example
df0 = pd.DataFrame({
'med':[1, 2, 3, 10, 11, 12, 1, 2],
'upper':[2, 5, 6, 12, 13, 16, 5, 6],
'lower':[-1, -2, -3, 4, 2, 2, 1, 2],
'categ':['a', 'b', 'c', 'd', 'a', 'c', 'd', 'a'],
'country':['JP', 'JP', 'JP', 'US', 'US', 'US', 'BR', 'BR']})
res = PlotCIWrt(
df=df0,
colUpper='upper',
colLower='lower',
sliceCols=['country'],
labelCol='categ',
col='med',
ciHeight=0.5,
rotation = 0,
pltTitle="WTF is going on?",
figSize=[10, 30])
'''
## this function will partition a df using keyCol
# for which their row (so maybe involve other columns to check conditions)
# satisfy conditions
# any combination of keys which passes the condition at least once will be
# considered as satisfy
def PartDf_byKeyCols_wrtCond(
df, keyCols, condDict, passColName='passesCond'):
keyDfUnique = df[keyCols].drop_duplicates()
ind = BuildCondInd(df=df, condDict=condDict)
passDf = df[ind].copy()
passKeyDf = passDf[keyCols].drop_duplicates()
passKeyDf[passColName] = True
keyDfLabeled = pd.merge(keyDfUnique, passKeyDf, on=keyCols, how='left')
keyDfLabeled = keyDfLabeled.fillna(False)
dfLabeled = pd.merge(df, keyDfLabeled, on=keyCols, how='left')
return {'dfLabeled':dfLabeled, 'keyDfLabeled':keyDfLabeled}
'''
df = pd.DataFrame({
'user_id':[1, 1, 2, 2, 3, 3, 4, 4],
'device':['pixel', 'sams', 'lg', 'lg', 'sams', 'pixel', 'nex', 'pixel'],
'country':['us', 'us', 'jp', 'jp', 'kr', 'kr', 'in', 'in']})
outDict = PartDf_byKeyCols_wrtCond(
df=df, keyCols=['user_id'], condDict={'device':['pixel'],
'country':['us', 'in']}, passColName='passesCond')
Mark(df)
Mark(outDict['dfLabeled'])
Mark(outDict['keyDfLabeled'])
'''
## create good pandas boxplots
def PandasBoxPlt(
df, col, by, ylim=None, yscale=None, pltTitle=None, figSize=None):
# demonstrate how to customize the display different elements:
boxprops = dict(linestyle='-', linewidth=4, color='k')
medianprops = dict(linestyle='-', linewidth=4, color='k')
bp = df.boxplot(
column=col, by=by,
showfliers=False, showmeans=True,
boxprops=boxprops, medianprops=medianprops)
if yscale is not None:
plt.yscale(yscale)
[ax_tmp.set_xlabel('') for ax_tmp in np.asarray(bp).reshape(-1)]
fig = np.asarray(bp).reshape(-1)[0].get_figure()
if figSize is not None:
fig.set_size_inches(figSize[0], figSize[1])
plt.xticks(rotation=45)
axes = plt.gca()
if pltTitle is not None:
plt.title(pltTitle)
if ylim is not None:
axes.set_ylim(ylim)
return plt.show()
def Plt_compareUsageSet(
df, unitCol, usageCol, compareCol=None, excludeValues=[],
mapToOther=["UNKNOWN", "MOBILE_UNKNOWN"], removeOther=True,
setLabelsNumMax=15, bpPltTitle=None):
if compareCol is None:
compareCol = "..."
df[compareCol] = "..."
if len(excludeValues) > 0:
df = df[~df[usageCol].isin(excludeValues)]
df2 = df[[unitCol, compareCol, usageCol]].copy()
res = Remap_lowFreqCategs(
df=df2, cols=[usageCol], newLabels="OTHER",
otherLabelsToReMap=(["", "nan"] + mapToOther),
freqThresh=10, labelsNumMax=30)
df2 = res["df"]
if removeOther:
df2 = df2[df2[usageCol] != "OTHER"]
g = df2.groupby([unitCol, compareCol], as_index=False)
dfSet = g.agg({usageCol:lambda x: tuple(sorted(set(x)))})
res = Remap_lowFreqCategs(
df=dfSet, cols=[usageCol], newLabels="OTHER",
otherLabelsToReMap=["", "nan"],
freqThresh=5, labelsNumMax=setLabelsNumMax)
dfSet = res["df"]
if removeOther:
dfSet = dfSet[dfSet[usageCol] != "OTHER"]
pltTitle = usageCol + " set distbn " + " across " + unitCol + "s"
res = LabelDistbn_acrossSlice(
df=dfSet, sliceCol=compareCol, labelCol=usageCol,
pltIt=True, pltTitle=pltTitle)
dfCount = g.agg({usageCol:lambda x: len(set(x))})
res["dfCount"] = dfCount
if bpPltTitle is None:
bpPltTitle = "# of " + usageCol + " across " + unitCol + "s"
PandasBoxPlt(
df=dfCount, col=usageCol, by=compareCol,
ylim=[0, None], pltTitle=bpPltTitle)
return res
def BirthYear_toAgeCateg(x, currentYear=None):
if currentYear is None:
currentYear = datetime.datetime.now().year
if x is None or x == "" or x == 0 or math.isnan(x):
return "other"
x = float(x)
age = currentYear - x
if age <= 17:
return "<18"
if age <= 25:
return "18-25"
if age <= 35:
return "26-35"
if age <= 50:
return "36-50"
return ">51"
def BirthYear_toAge(x, currentYear=None, minBirthYear=1940):
if currentYear is None:
currentYear = datetime.datetime.now().year
if x is None or x == "" or x == 0 or math.isnan(x):
return None
if x < minBirthYear or x > currentYear:
return None
x = float(x)
return (currentYear - x)
"""
BirthYear_toAgeCateg(1900)
"""
def Plt_compareDensity(
df, compareCol, valueCol, compareValues=None):
if compareValues is None:
compareValues = set(df[compareCol].values)
# Iterate through the five airlines
for value in compareValues:
# Subset to the airline
subset = df[df[compareCol] == value]
# Draw the density plot
sns.distplot(
subset[valueCol], hist=False, kde=True,
kde_kws={'linewidth': 3, "alpha": 0.75},
label=value)
# Plot formatting
plt.legend(prop={'size': 8}, title=compareCol)
plt.title('Compare Density Plot for Multiple ' + compareCol)
plt.xlabel(valueCol)
plt.ylabel('Density')
## drops (multiple) ending vowels from a string
def DropEndingVowels(s, minLength=2):
cond = True
while cond and len(s) > minLength:
if s[len(s)-1].lower() in ["a", "o", "e", "u", "i"]:
s = s[0:(len(s)-1)]
else:
cond = False
return s
def DropEndingChars(s, chars, minLength=2):
cond = True
while cond and len(s) > minLength:
if s[len(s)-1].lower() in chars:
s = s[0:(len(s)-1)]
else:
cond = False
return s
## abbreviates a string.
# first we abbreviate each word in a string (phrase)
# then we concat them back and abbreviate the whole phrase
def AbbrString(
s,
wordLength=6,
replaceList=["/", "&", " and ", "-", ",", ";"],
sep="-",
totalLength=None,
wordNumLimit=None):
for char in replaceList:
s = s.replace(char, " ")
sList = s.split(" ")
sList = [s[0:wordLength] for s in sList]
sList = [x for x in sList if x not in ["", " ", " ", " "]]
sList = [DropEndingVowels(s) for s in sList]
sList = list(collections.OrderedDict.fromkeys(sList))
print(sList)
if wordNumLimit is not None:
sList = sList[:wordNumLimit]
s = sep.join(sList)
if totalLength is not None:
s = s[0:totalLength]
s = DropEndingVowels(s)
s = DropEndingChars(s=s, chars=["/", "&", " and ", "-", sep, " ", ",", ";"])
return s
"""
s = "language books/"
AbbrString(s, sep="-")
"""
## replace in pandas is slow
def ReplaceValues_dfCols_viaReplace(
df, cols, values, newValues, newCols=None):
if newCols is None:
newCols = cols
mappingDict = dict(zip(values, newValues))
df[newCols] = df[cols].replace(mappingDict)
return df
def ReplaceValues_dfCols(df, cols, values, newValues, newCols=None):
if newCols is None:
newCols = cols
m = pd.Series(newValues, values)
df[newCols] = df[cols].stack().map(m).unstack()
return df
"""
import datetime
import pandas as pd
import numpy as np
import string
n = 10000
m = 500
df = pd.DataFrame(
pd.DataFrame(
np.random.choice(list(string.letters), n * m * 3) \
.reshape(3, -1)).sum().values.reshape(n, -1))
cols = [0, 1]
u = np.unique(df[cols])
fromSeries = pd.Series(u)
toSeries = fromSeries + "XXX"
fromValues = fromSeries.values
toValues = toSeries.values
a = datetime.datetime.now()
df0 = ReplaceValues_dfCols(
df=df.copy(), cols=cols, values=fromValues, newValues=toValues)
b = datetime.datetime.now()
time1 = b-a
print(time1)
a = datetime.datetime.now()
df1 = ReplaceValues_dfCols_viaReplace(
df=df.copy(), cols=cols, values=fromValues,
newValues=toValues, newCols=None)
b = datetime.datetime.now()
time2 = b-a
print(time2)
print(time2.total_seconds() / time1.total_seconds())
"""
def AbbrStringCols(
df, cols, newCols=None, wordLength=6,
replaceList=["/", "&", " and ", "-"], sep="-",
totalLength=None, wordNumLimit=None):
values = np.unique(df[cols])
def Abbr(s):
return AbbrString(
s=s, wordLength=wordLength, replaceList=replaceList,
sep=sep, totalLength=totalLength, wordNumLimit=wordNumLimit)
abbrValues = [Abbr(s) for s in values]
mapDf = pd.DataFrame({"value":values, "abbr_values": abbrValues})
df = ReplaceValues_dfCols(
df=df, cols=cols, values=values, newValues=abbrValues, newCols=newCols)
return {"df":df, "mapDf":mapDf}
"""
df = pd.DataFrame({
"col":["life is beautiful", "i like mountains", "ok", "cool"]})
#AbbrStringCols(df, cols=["col"])
AbbrStringCols(df=df, cols=["col"], totalLength=10, wordNumLimit=None)
"""
## convert data.frame to code
def ConvertDf_toCode(df):
s = (
"df = pd.DataFrame( %s )"
% (str(df.to_dict()).replace(" nan"," float('nan')")))
return s
"""
df = pd.DataFrame({"a":[1, 2, 3], "b":[1, 2, 3]})
ConvertDf_toCode(df)
"""
| google/expt-analysis | python/data_analysis.py | data_analysis.py | py | 82,173 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "re.sub",
"line_number": 619,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 621,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 1113,
"usa... |
7262256391 | from http import HTTPStatus
from flask import current_app, jsonify, request
from app.models.vacine_model import Vacine
from sqlalchemy.exc import IntegrityError
from app.exc.errors import CpfInvalid
from app.services.verif_data import verify_data
from app.services.generate_data import data_generate
def get_vacines():
vacines = Vacine.query.all()
serialized = [
{
"cpf": vacine.cpf,
"name": vacine.name,
"vaccine_name": vacine.vaccine_name,
"health_unit_name": vacine.health_unit_name,
"first_shot_date": vacine.first_shot_date,
"second_shot_date": vacine.second_shot_date
}
for vacine in vacines
]
return jsonify(serialized), 200
def create_vacine():
data = request.get_json()
verify_data(data)
for key in data.keys():
if type(data[key]) != str:
return {"error": f" A chave {key} está em um formato inválido."}
try:
new_vaccine = Vacine(
cpf=data["cpf"],
name=data["name"],
vaccine_name=data["vaccine_name"],
health_unit_name=data["health_unit_name"],
first_shot_date=data_generate(),
second_shot_date=data_generate()
)
session = current_app.db.session
session.add(new_vaccine)
session.commit()
return jsonify(new_vaccine), 201
except IntegrityError:
return {"message": "CPF já cadastrado."}, HTTPStatus.CONFLICT
except CpfInvalid:
return {"message": "O CPF não está no formato correto."}, HTTPStatus.BAD_REQUEST
except KeyError as err:
return {"message": f"Está faltando a Key {str(err)}."}, HTTPStatus.BAD_REQUEST
| Kenzie-Academy-Brasil-Developers/q3-sprint5-vacinacao-theogandara | app/controllers/vacine_controller.py | vacine_controller.py | py | 1,750 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "app.models.vacine_model.Vacine.query.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.models.vacine_model.Vacine.query",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "app.models.vacine_model.Vacine",
"line_number": 11,
"u... |
2063946987 | from loader import dp
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Text
from loguru import logger
from datetime import datetime
@dp.message_handler(commands='reload', state='*')
@dp.message_handler(Text(equals='reload',
ignore_case=True), state='*')
async def cmd_reload(message: types.Message,
state: FSMContext) -> None:
"""
Функция ресетит машину состояний
:param msg: Message сообщение пользователя
:param state: FSMContext машина состояний
"""
cur_state = await state.get_state()
logger.info(
f'\nПользователь: {message.from_user.full_name}, '
f'id: {message.from_user.id}, выполнил перезагрузку,'
f'пользователь был в состоянии {cur_state},'
f'дата: {datetime.now()}'
)
await message.answer('Перезагрузка')
await state.reset_state()
| Taiven396/tickets_bot | handlers/reload.py | reload.py | py | 1,059 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "aiogram.types.Message",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.FSMContext",
"line_number": 13,
"usage_type": "name"
},
{
"api_name":... |
13941876090 | from argparse import ArgumentParser
from sudoku_solver import SudokuSolver
from sudoku import Sudoku
def get_args():
parser = ArgumentParser()
parser.add_argument('--sudoku', required=True)
return parser.parse_args()
def main():
args = get_args()
sudoku = Sudoku.from_file(args.sudoku)
solver = SudokuSolver(sudoku)
solved_sudoku = solver.solve()
solved_sudoku.print_sudoku()
if __name__ == '__main__':
main() | delabania/sudoku-solver | solve.py | solve.py | py | 450 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sudoku.Sudoku.from_file",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sudoku.Sudoku",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "sudoku_s... |
38354134434 | from dataclasses import dataclass
from typing import Optional, Tuple
import torch.nn as nn
import torch
from transformers.models.dpr.modeling_dpr import DPRReaderOutput
from transformers.modeling_outputs import QuestionAnsweringModelOutput, ModelOutput, SequenceClassifierOutput
from transformers.models.vilt.modeling_vilt import ViltForImagesAndTextClassificationOutput
from transformers import VisualBertForQuestionAnswering, VisualBertForVisualReasoning, LxmertForQuestionAnswering
from transformers import ViltProcessor, ViltForImagesAndTextClassification
from transformers import BertForQuestionAnswering
from meerqat.train.losses import _calc_mml
class Trainee(nn.Module):
"""Base class for all Trainee models (to be trained by a Trainer)
Should implement a forward function that returns loss between output and target (as a tuple, dict or ModelOutput)
The actual forward pass should be done using the model attribute
"""
def __init__(self, model):
super().__init__()
self.model = model
@dataclass
class DPRReaderForQuestionAnsweringOutput(DPRReaderOutput):
"""Same as DPRReaderOutput with an extra loss attribute (or as QuestionAnsweringModelOutput with relevance_logits)
N. B. unfortunately we have to redefine everything so that loss is the first attribute
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
relevance_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MultiPassageBERTOutput(QuestionAnsweringModelOutput):
"""
Same as QuestionAnsweringModelOutput but with start and end log-probabilities
(equivalent to softmax(start_logits) when there is only one passage per question)
"""
start_log_probs: torch.FloatTensor = None
end_log_probs: torch.FloatTensor = None
@dataclass
class BERTRankerOutput(QuestionAnsweringModelOutput):
"""
Same as MultiPassageBERTOutput but with relevance_logits important for ranking
"""
loss: Optional[torch.FloatTensor] = None
relevance_logits: torch.FloatTensor = None
@dataclass
class DPRBiEncoderOutput(ModelOutput):
"""
Outputs from the question and context encoders
(same as DPRQuestionEncoderOutput, DPRContextEncoderOutput with prefixes)
"""
question_pooler_output: Optional[torch.FloatTensor] = None
question_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
question_attentions: Optional[Tuple[torch.FloatTensor]] = None
context_pooler_output: Optional[torch.FloatTensor] = None
context_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
context_attentions: Optional[Tuple[torch.FloatTensor]] = None
class DPRBiEncoder(nn.Module):
"""Adapted from https://github.com/facebookresearch/DPR/blob/main/dpr/models/biencoder.py"""
def __init__(self, question_model, context_model):
"""
Parameters
----------
question_model: transformers.DPRQuestionEncoder
Encoder based on BERT used to encode the question/query
context_model: transformers.DPRContextEncoder
Encoder based on BERT used to encode the context/evidence/passage
('context' is confusing IMO but I keep it for consistency with DPR and transformers)
"""
super().__init__()
self.question_model = question_model
self.context_model = context_model
def forward(self, question_inputs, context_inputs, return_dict=None):
"""
Embeds questions and contexts with their respective model and returns the embeddings.
N - number of questions in a batch
M - number of passages per questions
L - sequence length
d - dimension of the model/embeddings
Parameters
----------
question_inputs: dict[torch.LongTensor]
input_ids: torch.LongTensor
shape (N, L)
usual BERT inputs, see transformers.DPRQuestionEncoder
context_inputs: dict[torch.LongTensor]
input_ids: torch.LongTensor
shape (N*M, L)
usual BERT inputs, see transformers.DPRContextEncoder
return_dict: bool, optional
"""
return_dict = return_dict if return_dict is not None else self.question_model.config.use_return_dict
# embed questions and contexts
question_outputs = self.question_model(**question_inputs)
context_outputs = self.context_model(**context_inputs)
return DPRBiEncoderOutput(
question_pooler_output=question_outputs.pooler_output,
question_hidden_states=question_outputs.hidden_states,
question_attentions=question_outputs.attentions,
context_pooler_output=context_outputs.pooler_output,
context_hidden_states=context_outputs.hidden_states,
context_attentions=context_outputs.attentions)
class DPRReaderForQuestionAnswering(Trainee):
def forward(self,
input_ids, attention_mask,
start_positions=None, end_positions=None, answer_mask=None,
return_dict=None, **kwargs):
"""Based on transformers.BertForQuestionAnswering and dpr.models.Reader"""
return_dict = return_dict if return_dict is not None else self.model.config.use_return_dict
# notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length
N, M, L = input_ids.size()
outputs = self.model(input_ids, attention_mask, return_dict=True, **kwargs)
# compute loss
total_loss = None
if start_positions is not None and end_positions is not None:
start_positions = start_positions.view(N * M, -1)
end_positions = end_positions.view(N * M, -1)
answer_mask = answer_mask.view(N * M, -1)
start_logits, end_logits, relevance_logits = outputs[:3]
start_logits = start_logits.view(N * M, -1)
end_logits = end_logits.view(N * M, -1)
relevance_logits = relevance_logits.view(N * M)
answer_mask = answer_mask.to(device=relevance_logits.device, dtype=torch.float32)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = nn.CrossEntropyLoss(reduction='none', ignore_index=ignored_index)
# compute switch loss
relevance_logits = relevance_logits.view(N, M)
switch_labels = torch.zeros(N, dtype=torch.long, device=relevance_logits.device)
switch_loss = torch.sum(loss_fct(relevance_logits, switch_labels))
# compute span loss
start_losses = [(loss_fct(start_logits, _start_positions) * _span_mask)
for (_start_positions, _span_mask)
in zip(torch.unbind(start_positions, dim=1), torch.unbind(answer_mask, dim=1))]
end_losses = [(loss_fct(end_logits, _end_positions) * _span_mask)
for (_end_positions, _span_mask)
in zip(torch.unbind(end_positions, dim=1), torch.unbind(answer_mask, dim=1))]
loss_tensor = torch.cat([t.unsqueeze(1) for t in start_losses], dim=1) + \
torch.cat([t.unsqueeze(1) for t in end_losses], dim=1)
loss_tensor = loss_tensor.view(N, M, -1).max(dim=1)[0]
span_loss = _calc_mml(loss_tensor)
total_loss = span_loss + switch_loss
if not return_dict:
outputs = outputs.to_tuple()
return ((total_loss,) + outputs) if total_loss is not None else outputs
return DPRReaderForQuestionAnsweringOutput(loss=total_loss, **outputs)
class MultiPassageBERT(BertForQuestionAnswering):
"""
PyTorch/Transformers implementation of Multi-passage BERT by Wang et. al (based on the global normalization by Clark et. al)
i.e. groups passages per question before computing the softmax (and the NLL loss)
so that spans scores are comparable across passages
Code based on transformers.BertForQuestionAnswering, dpr.models.Reader
and https://github.com/allenai/document-qa/blob/master/docqa/nn/span_prediction.py
N. B. differences with DPRReaderForQuestionAnswering:
* no projection layer between BERT and QA-extraction
* no re-ranking (TODO implement MultiPassageDPRReader?)
* global normalization
References
----------
@inproceedings{wang_multi-passage_2019,
address = {Hong Kong, China},
title = {Multi-passage {BERT}: {A} {Globally} {Normalized} {BERT} {Model} for {Open}-domain {Question} {Answering}},
shorttitle = {Multi-passage {BERT}},
url = {https://www.aclweb.org/anthology/D19-1599},
doi = {10.18653/v1/D19-1599},
urldate = {2021-06-14},
booktitle = {Proceedings of the 2019 {Conference} on {Empirical} {Methods} in {Natural} {Language} {Processing} and the 9th {International} {Joint} {Conference} on {Natural} {Language} {Processing} ({EMNLP}-{IJCNLP})},
publisher = {Association for Computational Linguistics},
author = {Wang, Zhiguo and Ng, Patrick and Ma, Xiaofei and Nallapati, Ramesh and Xiang, Bing},
month = nov,
year = {2019},
pages = {5878--5882}
}
@inproceedings{clark_simple_2018,
address = {Melbourne, Australia},
title = {Simple and {Effective} {Multi}-{Paragraph} {Reading} {Comprehension}},
url = {https://aclanthology.org/P18-1078},
doi = {10.18653/v1/P18-1078},
urldate = {2021-07-08},
booktitle = {Proceedings of the 56th {Annual} {Meeting} of the {Association} for {Computational} {Linguistics} ({Volume} 1: {Long} {Papers})},
publisher = {Association for Computational Linguistics},
author = {Clark, Christopher and Gardner, Matt},
month = jul,
year = {2018},
pages = {845--855},
}
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log_softmax = nn.LogSoftmax(1)
def forward(self,
input_ids,
start_positions=None, end_positions=None, answer_mask=None,
return_dict=None, **kwargs):
"""
notations:
N - number of distinct questions
M - number of passages per question in a batch
L - sequence length
Parameters
----------
input_ids: Tensor[int]
shape (N * M, L)
There should always be a constant number of passages (relevant or not) per question
start_positions, end_positions: Tensor[int], optional
shape (N, M, max_n_answers)
The answer might be found several time in the same passage, maximum `max_n_answers` times
Defaults to None (i.e. don’t compute the loss)
answer_mask: Tensor[int], optional
shape (N, M, max_n_answers)
Used to mask the loss for answers that are not `max_n_answers` times in the passage
Required if start_positions and end_positions are specified
**kwargs: additional arguments are passed to BERT after being reshape like
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(input_ids, return_dict=True, **kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
# compute loss
total_loss, start_log_probs, end_log_probs = None, None, None
if start_positions is not None and end_positions is not None:
n_times_m, L = input_ids.size()
M = start_positions.size(1)
assert n_times_m % M == 0
N = n_times_m//M
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = L
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = nn.NLLLoss(reduction='none', ignore_index=ignored_index)
# reshape from (N * M, L) to (N, M * L) so that all M passages related to the same question
# will share the same softmax normalization
start_logits, end_logits = start_logits.view(N, M*L), end_logits.view(N, M*L)
start_log_probs, end_log_probs = self.log_softmax(start_logits), self.log_softmax(end_logits)
# after computing the softmax, reshape back to (N * M, L)
# because the last dimension, L, must match the position indices (i.e. class label) in start_positions, end_positions
start_log_probs, end_log_probs = start_log_probs.view(N*M, L), end_log_probs.view(N*M, L)
start_logits, end_logits = start_logits.view(N*M, L), end_logits.view(N*M, L)
# reshape to match model output
start_positions, end_positions = start_positions.view(N*M, -1), end_positions.view(N*M, -1)
answer_mask = answer_mask.to(device=input_ids.device, dtype=torch.float32).view(N*M, -1)
# compute span loss for each answer position in passage (in range `max_n_answers`)
start_losses = [(loss_fct(start_log_probs, _start_positions) * _span_mask)
for (_start_positions, _span_mask)
in zip(torch.unbind(start_positions, dim=1), torch.unbind(answer_mask, dim=1))]
end_losses = [(loss_fct(end_log_probs, _end_positions) * _span_mask)
for (_end_positions, _span_mask)
in zip(torch.unbind(end_positions, dim=1), torch.unbind(answer_mask, dim=1))]
loss_tensor = torch.cat([t.unsqueeze(1) for t in start_losses], dim=1) + \
torch.cat([t.unsqueeze(1) for t in end_losses], dim=1)
# keep the maximum per passage for each question
loss_tensor = loss_tensor.view(N, M, -1).max(dim=1)[0]
total_loss = _calc_mml(loss_tensor)
if not return_dict:
output = (start_logits, end_logits, start_log_probs, end_log_probs) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return MultiPassageBERTOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
start_log_probs=start_log_probs,
end_log_probs=end_log_probs,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BERTRanker(BertForQuestionAnswering):
"""
BERT-based Ranker Based on transformers.BertForQuestionAnswering
and https://github.com/allenai/document-qa/blob/master/docqa/nn/span_prediction.py
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.qa_classifier = nn.Linear(self.config.hidden_size, 1)
def forward(self,
input_ids,
switch_labels=None,
N=None, M=None,
indices=None, relevants=None,
return_dict=None, **kwargs):
"""
notations:
N - number of distinct questions
M - number of passages per question in a batch
L - sequence length
Parameters
----------
input_ids: Tensor[int]
shape (N * M, L)
There should always be a constant number of passages (relevant or not) per question
**kwargs: additional arguments are passed to BERT after being reshape like
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(input_ids, return_dict=True, **kwargs)
sequence_output = outputs[0]
relevance_logits = self.qa_classifier(sequence_output[:, 0, :])
switch_loss = None
if len(switch_labels) > 0:
loss_fct = nn.CrossEntropyLoss(reduction='mean')
# compute switch loss
relevance_logits = relevance_logits.view(N, M)
switch_loss = loss_fct(relevance_logits, switch_labels)
if not return_dict:
output = (relevance_logits) + outputs[2:]
return ((switch_loss,) + output) if switch_loss is not None else output
return BERTRankerOutput(
loss=switch_loss,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
relevance_logits=relevance_logits,
)
class ViLTRanker(ViltForImagesAndTextClassification):
"""
ViLT-based Ranker Based on transformers.ViltForImagesAndTextClassification
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Classifier head
num_images = self.config.num_images
self.qa_classifier = nn.Sequential(
nn.Linear(self.config.hidden_size * num_images, self.config.hidden_size * num_images),
nn.LayerNorm(self.config.hidden_size * num_images),
nn.GELU(),
nn.Linear(self.config.hidden_size * num_images, 1),
)
def forward(self,
input_ids, pixel_values, pixel_mask,
output_attentions=None,
output_hidden_states=None,
switch_labels=None,
N=None, M=None,
indices=None, relevants=None,
return_dict=None, **kwargs):
"""
notations:
N - number of distinct questions
M - number of passages per question in a batch
L - sequence length
Parameters
----------
input_ids: Tensor[int]
shape (N * M, L)
There should always be a constant number of passages (relevant or not) per question
**kwargs: additional arguments are passed to BERT after being reshape like
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is not None and pixel_values.ndim == 4:
# add dummy num_images dimension
pixel_values = pixel_values.unsqueeze(1)
num_images = pixel_values.shape[1]
if num_images != self.config.num_images:
raise ValueError(
"Make sure to match the number of images in the model with the number of images in the input."
)
pooler_outputs = []
hidden_states = [] if output_hidden_states else None
attentions = [] if output_attentions else None
for i in range(num_images):
# forward every image through the model
outputs = self.vilt(
input_ids,
pixel_values=pixel_values[:, i, :, :, :],
pixel_mask=pixel_mask[:, i, :, :] if pixel_mask is not None else None,
image_token_type_idx=i + 1,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs
)
pooler_output = outputs.pooler_output if return_dict else outputs[1]
pooler_outputs.append(pooler_output)
if output_hidden_states:
hidden_states.append(outputs.hidden_states)
if output_attentions:
attentions.append(outputs.attentions)
pooled_output = torch.cat(pooler_outputs, dim=-1)
relevance_logits = self.qa_classifier(pooled_output)
switch_loss = None
if len(switch_labels) > 0:
loss_fct = nn.CrossEntropyLoss(reduction='mean')
# compute switch loss
relevance_logits = relevance_logits.view(N, M)
switch_loss = loss_fct(relevance_logits, switch_labels)
if not return_dict:
output = (relevance_logits, hidden_states, attentions)
return ((switch_loss,) + output) if switch_loss is not None else output
return ViltForImagesAndTextClassificationOutput(
loss=switch_loss,
logits=relevance_logits,
hidden_states=hidden_states,
attentions=attentions,
)
| mdsalem17/reranking | meerqat/train/trainee.py | trainee.py | py | 21,260 | python | en | code | null | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "transformers.models.dpr.modeling_dpr.DPRReaderOutput",
"line_number": 27,
"usage_type": "name"
},
{
... |
5820650421 | import hashlib
import json
import urllib.parse
from typing import Union, Dict
import asks
from asks.response_objects import Response
from spins_halp_line.constants import Credentials
from spins_halp_line.util import get_logger, SynchedCache
_cred_key = "resource_space"
_field_ids = {
"adventure_name": 86,
"player": 87
}
_base_url = "base_url"
_user = "user"
_secret = "secret"
_l = get_logger()
# search:
# [
# {
# "score":"0",
# "ref":"1001",
# "resource_type":"4",
# "has_image":"0",
# "is_transcoding":"0",
# "creation_date":"2020-11-18 19:13:51",
# "rating":"",
# "user_rating":"",
# "user_rating_count":"",
# "user_rating_total":"",
# "file_extension":"mp3",
# "preview_extension":"jpg",
# "image_red":"",
# "image_green":"",
# "image_blue":"",
# "thumb_width":"",
# "thumb_height":"",
# "archive":"0",
# "access":"0",
# "colour_key":"",
# "created_by":"1",
# "file_modified":"2020-11-18 19:13:51",
# "file_checksum":"",
# "request_count":"0",
# "new_hit_count":"8",
# "expiry_notification_sent":"0",
# "preview_tweaks":"0|1",
# "file_path":"",
# "modified":"2020-11-19 03:58:07",
# "group_access":"",
# "user_access":"",
# "field12":"2020-11-18 19:13",
# "field8":"Shipwreck Front Yard",
# "field3":"",
# "order_by":"",
# "total_hit_count":"8"
# }
# ]
# get_resource_data
# {
# "ref":"1001", // both
# "title":"", // not the title field in the ui lol
# "resource_type":"4", // both
# "has_image":"0", // both
# "is_transcoding":"0", // both
# "hit_count":"8",
# "new_hit_count":"8", // both
# "creation_date":"2020-11-18 19:13:51", // both
# "rating":"", // both
# "user_rating":"", // both
# "user_rating_count":"", // both
# "user_rating_total":"", // both
# "country":"",
# "file_extension":"mp3", // both
# "preview_extension":"jpg", // both
# "image_red":"", // both
# "image_green":"", // both
# "image_blue":"", // both
# "thumb_width":"", // both
# "thumb_height":"", // both
# "archive":"0", // both
# "access":"0", // both
# "colour_key":"", // both
# "created_by":"1", // both
# "file_path":"", // both
# "file_modified":"2020-11-18 19:13:51", // both
# "file_checksum":"", // both
# "request_count":"0", // both
# "expiry_notification_sent":"0", // both
# "preview_tweaks":"0|1", // both
# "geo_lat":"",
# "geo_long":"",
# "mapzoom":"",
# "disk_usage":"623803",
# "disk_usage_last_updated":"2020-11-18 19:13:52",
# "file_size":"623803",
# "preview_attempts":"1",
# "field12":"2020-11-18 19:13", // both (?)
# "field8":"Shipwreck Front Yard", // both (title)
# "field3":"", // both (?)
# "modified":"2020-11-19 03:58:07",
# "last_verified":"",
# "integrity_fail":"0",
# "google_vision_processed":"",
# "lock_user":""
# }
# Community boards are useful: https://groups.google.com/g/resourcespace?pli=1
# static cache
_global_cache = SynchedCache()
class RSResource(object):
@classmethod
async def for_room(cls, room_name):
# https://www.resourcespace.com/knowledge-base/user/special-search-terms
# todo: The caching logic here could use improvement. We cache the data from a particular room
# todo: so if we load the same room again we won't repeat those requests...but it seems wrong to
# todo: cache searches? This caching model is based on a read-only assumption - that the server
# todo: will be restarted if we make changes in the CMS. Maybe we should cache search results?
# todo: In any case, since these calls should mostly be made once, it's possible that any caching
# todo: is properly viewed as premature optimization.
files = await cls._get(
'do_search',
{
'search': f'room:{room_name}'
}
)
files = [f for f in files if f['field8'] == room_name]
return await cls._from_list(files)
@classmethod
async def _from_list(cls, resources):
result = []
for r in resources:
obj = RSResource(r)
await obj.load()
result.append(obj)
return result
_k_id = 'ref'
_k_ext = 'file_extension'
_k_ui_title = 'field8'
_k_d_url = 'data_url'
_k_adventure = 'adventure_name'
_k_player = 'player'
_k_room = 'room'
_k_date = 'date'
_k_duration = 'duration'
_k_path = 'path'
_extended_fields = [
_k_adventure,
_k_player,
_k_room,
_k_date,
_k_duration,
_k_path
]
_resource_types = {
'1': 'photo',
'2': 'document',
'3': 'video',
'4': 'audio'
}
def __init__(self, data: Union[Dict[str, str], str, int]):
global _global_cache
self._cache = _global_cache
self._data = {}
self._loaded = False
self._id = None
if isinstance(data, dict):
self._data = data
self._loaded_basic = True
# self._id = data.get(self._k_id)
elif isinstance(data, int) or isinstance(data, str):
self._id = int(data)
def _throw_if_not_loaded(self):
if not self._loaded:
raise ValueError(f'{self} has not had its fields loaded!')
async def load(self):
cache_key = self.id
# support caching results
# todo: this cache doesn't work
data = await self._cache.get(cache_key)
print(f'loading resource {self.id}')
if data is None:
data = await self.get_info()
data = await self.load_extended_fields(data)
self._data = data
# do this last so the extension is loaded
data[self._k_d_url] = await self.get_data_url()
await self._cache.set(cache_key, data)
self._data = data
self._loaded = True
async def load_extended_fields(self, data):
for field in (await self.get_all_fields()):
name = field['name']
if name in self._extended_fields:
data[name] = field['value']
return data
@property
def id(self):
return self._data.get(self._k_id, self._id)
@property
def ext(self):
return self._data.get(self._k_ext)
@property
def title(self):
return self._data.get(self._k_ui_title)
@property
def url(self):
return self._data.get(self._k_d_url)
@property
def adventure(self):
return self._data.get(self._k_adventure)
@property
def player(self):
return self._data.get(self._k_player)
@property
def room(self):
return self._data.get(self._k_room)
@property
def date(self):
return self._data.get(self._k_date)
@property
def duration(self):
return self._data.get(self._k_duration)
@property
def path(self):
return self._data.get(self._k_path)
async def get_data_url(self):
return await self._get(
'get_resource_path',
{
'ref': self.id,
'getfilepath': 0,
'extension': self.ext,
# 'generate': True,
# 'alternative': -1,
# 'size': ''
}
)
async def get_info(self):
return await self._get(
'get_resource_data',
{
'resource': self.id
}
)
# Example response JSON:
# [
# {"value": "Shipwreck Adventure", "resource_type_field": "86", "ref": "86", "name": "adventure_name",
# "title": "Adventure Name", "field_constraint": "0", "type": "3", "order_by": "0", "keywords_index": "1",
# "partial_index": "0", "resource_type": "0", "resource_column": "", "display_field": "1",
# "use_for_similar": "1", "iptc_equiv": "", "display_template": "", "tab_name": "", "required": "0",
# "smart_theme_name": "", "exiftool_field": "", "advanced_search": "1", "simple_search": "0", "help_text": "",
# "display_as_dropdown": "0", "external_user_access": "1", "autocomplete_macro": "", "hide_when_uploading": "0",
# "hide_when_restricted": "0", "value_filter": "", "exiftool_filter": "", "omit_when_copying": "0",
# "tooltip_text": "", "regexp_filter": "", "sync_field": "", "display_condition": "", "onchange_macro": "",
# "linked_data_field": "", "automatic_nodes_ordering": "0", "fits_field": "", "personal_data": "0",
# "include_in_csv_export": "1", "browse_bar": "1", "read_only": "0", "active": "1", "full_width": "0",
# "frequired": "0", "fref": "86"},
# {"value": "", "resource_type_field": "87", "ref": "87", "name": "player", "title": "Player",
# "field_constraint": "0", "type": "0", "order_by": "0", "keywords_index": "1", "partial_index": "0",
# "resource_type": "0", "resource_column": "", "display_field": "1", "use_for_similar": "1", "iptc_equiv": "",
# "display_template": "", "tab_name": "", "required": "0", "smart_theme_name": "", "exiftool_field": "",
# "advanced_search": "1", "simple_search": "0", "help_text": "", "display_as_dropdown": "0",
# "external_user_access": "1", "autocomplete_macro": "", "hide_when_uploading": "0", "hide_when_restricted": "0",
# "value_filter": "", "exiftool_filter": "", "omit_when_copying": "0", "tooltip_text": "", "regexp_filter": "",
# "sync_field": "", "display_condition": "", "onchange_macro": "", "linked_data_field": "",
# "automatic_nodes_ordering": "0", "fits_field": "", "personal_data": "0", "include_in_csv_export": "1",
# "browse_bar": "1", "read_only": "0", "active": "1", "full_width": "0", "frequired": "0", "fref": "87"},
# {"value": "Shipwreck Yard Front", "resource_type_field": "88", "ref": "88", "name": "room", "title": "Room",
# "field_constraint": "", "type": "3", "order_by": "0", "keywords_index": "1", "partial_index": "0",
# "resource_type": "0", "resource_column": "", "display_field": "1", "use_for_similar": "1", "iptc_equiv": "",
# "display_template": "", "tab_name": "", "required": "0", "smart_theme_name": "", "exiftool_field": "",
# "advanced_search": "1", "simple_search": "0", "help_text": "", "display_as_dropdown": "0",
# "external_user_access": "1", "autocomplete_macro": "", "hide_when_uploading": "0", "hide_when_restricted": "0",
# "value_filter": "", "exiftool_filter": "", "omit_when_copying": "0", "tooltip_text": "", "regexp_filter": "",
# "sync_field": "", "display_condition": "", "onchange_macro": "", "linked_data_field": "",
# "automatic_nodes_ordering": "0", "fits_field": "", "personal_data": "0", "include_in_csv_export": "1",
# "browse_bar": "1", "read_only": "0", "active": "1", "full_width": "0", "frequired": "0", "fref": "88"},
# {"value": "Description", "resource_type_field": "8", "ref": "8", "name": "title", "title": "Title",
# "field_constraint": "", "type": "0", "order_by": "10", "keywords_index": "1", "partial_index": "0",
# "resource_type": "0", "resource_column": "title", "display_field": "0", "use_for_similar": "1",
# "iptc_equiv": "2#005", "display_template": "", "tab_name": "", "required": "1", "smart_theme_name": "",
# "exiftool_field": "Title", "advanced_search": "1", "simple_search": "0", "help_text": "",
# "display_as_dropdown": "0", "external_user_access": "1", "autocomplete_macro": "", "hide_when_uploading": "0",
# "hide_when_restricted": "0", "value_filter": "", "exiftool_filter": "", "omit_when_copying": "",
# "tooltip_text": "", "regexp_filter": "", "sync_field": "", "display_condition": "", "onchange_macro": "",
# "linked_data_field": "", "automatic_nodes_ordering": "0", "fits_field": "", "personal_data": "0",
# "include_in_csv_export": "1", "browse_bar": "1", "read_only": "0", "active": "1", "full_width": "0",
# "frequired": "1", "fref": "8"},
# ...
# ]
async def get_all_fields(self):
return await self._get(
'get_resource_field_data',
{
'resource': self.id
}
)
def _add_extended_field(self, field):
self._data[field['name']] = field['']
@staticmethod
async def _get(function, params, unwrap=True) -> dict:
base_url = Credentials[_cred_key][_base_url]
params['function'] = function
params['user'] = Credentials[_cred_key][_user]
qstring = urllib.parse.urlencode(params)
secret = Credentials[_cred_key][_secret]
signer = hashlib.sha256()
signer.update(f'{secret}{qstring}'.encode("utf-8"))
request = f'{base_url}?{qstring}&sign={signer.hexdigest()}'
result: Response = await asks.get(request)
# print("-" * 60)
# print(request)
# print(">" * 5)
# print(result)
# print("\\/" * 5)
# print(result.content.decode("utf-8"))
# print("-" * 60)
# if unwrap and result.status_code >= 200 and result.status_code < 300:
result: dict = json.loads(result.content.decode("utf-8"))
return result
def __str__(self):
return f'RSR[{self.id}] {self.url}'
def __repr__(self):
return str(self)
| aeturnum/spins_halp_line | spins_halp_line/media/resource_space.py | resource_space.py | py | 13,353 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "spins_halp_line.util.get_logger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "spins_halp_line.util.SynchedCache",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 177,
"usage_type": "name"
},
{
"... |
12830919470 | from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy = ListNode()
while head:
cur = head
head = head.next
cur.next = dummy.next
dummy.next = cur
return dummy.next
| theRobertSan/LeetCode-Solutions-Python | 206.py | 206.py | py | 419 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.Optional",
"line_number": 11,
"usage_type": "name"
}
] |
11473045132 | '''
Terminal
!pip install dash==0.26.5 # The core dash backend
!pip install dash-html-components==0.12.0 # HTML components
!pip install dash-core-components==0.28.0 # Supercharged components
!pip install dash_bootstrap_components==0.13.1
'''
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
from dash import Dash, dcc, html
import plotly.express as px
import pandas as pd
import dash_bootstrap_components as dbc
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# create dash
app = Dash(__name__)
colors = {
'background': '#FFFFFF',
'text': '#288CC2'
}
### bar chart example
df = pd.DataFrame({
"Fruit": ["Apples", "Oranges", "Bananas", "Apples", "Oranges", "Bananas"],
"Amount": [4, 1, 2, 2, 4, 5],
"City": ["SF", "SF", "SF", "Montreal", "Montreal", "Montreal"]
})
fig = px.bar(df, x="Fruit", y="Amount", color="City", barmode="group")
fig.update_layout(
#plot_bgcolor=colors['background'],
#paper_bgcolor=colors['background'],
font_color=colors['text']
)
### scatter plot example
df2 = pd.read_csv('https://gist.githubusercontent.com/chriddyp/5d1ea79569ed194d432e56108a04d188/raw/a9f9e8076b837d541398e999dcbac2b2826a81f8/gdp-life-exp-2007.csv')
fig2 = px.scatter(df2, x="gdp per capita", y="life expectancy",
size="population", color="continent", hover_name="country",
log_x=True, size_max=60)
fig2.update_layout(
font_color=colors['text'])
### violin plot example 1
df3 = pd.DataFrame(
{'x':np.tile(['no', 'yes'], 80000),
'y':np.random.normal(0, 1, 160000),
'cl':np.repeat([0, 1], 80000)
}
)
fig3 = px.violin(df3, x="x", y="y", color='cl', box=True, hover_data=df3.columns)
fig4 = px.violin(df3, y="y", color='cl',
violinmode='overlay', # draw violins on top of each other
# default violinmode is 'group' as in example above
hover_data=df3.columns)
### violin plot example 2
df4 = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/violin_data.csv")
fig5 = go.Figure()
fig5.add_trace(go.Violin(x=df4['day'][ df4['smoker'] == 'Yes' ],
y=df4['total_bill'][ df4['smoker'] == 'Yes' ],
legendgroup='Yes', scalegroup='Yes', name='Yes',
side='negative',
line_color='blue')
)
fig5.add_trace(go.Violin(x=df4['day'][ df4['smoker'] == 'No' ],
y=df4['total_bill'][ df4['smoker'] == 'No' ],
legendgroup='No', scalegroup='No', name='No',
side='positive',
line_color='orange')
)
fig5.update_traces(meanline_visible=True) # orientation='h' -> horizontal
fig5.update_layout(violingap=0, violinmode='overlay')
### subplot example
df5 = px.data.iris()
fig6 = make_subplots(rows=1,
cols=2,
subplot_titles=[
'Fruit', # 1. subplot title
'City' # 2. subplot title
])
fig6.add_trace(go.Bar(x=df['Fruit'], y=df['Amount']),row=1, col=1)
fig6.add_trace(go.Bar(x=df['City'], y=df['Amount'], text=df['Amount'],
textposition='auto',), row=1, col=2)
fig6.update_layout(title='Count', title_x=0.5)
# set the web layout
app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[
html.H1(
children='Hello Dash',
style={
'textAlign': 'center',
'color': colors['text']
}
),
html.Div(children='Dash: A web application framework for your data.', style={
'textAlign': 'center',
'color': colors['text']
}),
dcc.Graph(
id='example-graph-1',
figure=fig
),
dcc.Graph(
id='example-graph-2',
figure=fig2
),
dcc.Graph(
id='example-graph-3',
figure=fig3
),
dcc.Graph(
id='example-graph-5',
figure=fig5
),
dcc.Graph(
id='example-graph-6',
figure=fig6
),
])
if __name__ == '__main__':
app.run_server(debug=True)
| hsyho11/python-plotly-dash | plotly_example.py | plotly_example.py | py | 4,180 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dash.Dash",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "plotly.express.bar",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"l... |
19583758283 | """
This file is used to perform a random hyperparameter search on the Coco dataset using the baseline image captioner.
For more info on the ImageCaptionerBaseline class, please check out the docstrings in the image_captioning.py file.
"""
# Package loading
import argparse
import os
import sys
sys.path.append('..')
# Depending on the platform/IDE used, the home directory might be the socraticmodels or the
# socraticmodels/scripts directory. The following ensures that the current directory is the scripts folder.
try:
os.chdir('scripts')
except FileNotFoundError:
pass
# Local imports
from scripts.image_captioning import ImageCaptionerBaseline
def parse_arguments():
"""
Parses the arguments for the baseline COCO captioning hyperparameter tuning.
:return:
"""
# init argparser
parser = argparse.ArgumentParser(description='Baseline Image Captioning Hyperparameter tuning')
# Additional variables
parser.add_argument('--n-images', type=int, default=50, help='# images to include in the dataset')
parser.add_argument('--set-type', type=str, default='train', help='train/valid/test set')
parser.add_argument('--n-iterations', type=int, default=100, help='# of run iterations')
parser.add_argument('--n-captions', type=int, default=10, help='# captions the LM should generate')
parser.add_argument('--lm-max-length', type=int, default=40, help='max output length the LM should generate')
parser.add_argument('--lm-do-sample', type=bool, default=True, help='whether to use sampling during generation')
parser.add_argument('--lm-temp-min', type=float, default=0.5, help='minimum temperature param for the lm')
parser.add_argument('--lm-temp-max', type=float, default=1, help='maximum temperature param for the lm')
parser.add_argument('--n-objects-min', type=int, default=5, help='minimum number of objects in the LM prompt')
parser.add_argument('--n-objects-max', type=int, default=15, help='maximum number of objects in the LM prompt')
parser.add_argument('--n-places-min', type=int, default=1, help='minimum number of places in the LM prompt')
parser.add_argument('--n-places-max', type=int, default=6, help='maximum number of places in the LM prompt')
parser.add_argument('--caption-strategies', nargs="+", default=None)
# parse args
args = parser.parse_args()
return args
if __name__ == '__main__':
# Parse the arguments.
args = parse_arguments()
# Instantiate the baseline image captioner class.
image_captioner = ImageCaptionerBaseline(n_images=args.n_images, set_type=args.set_type)
# Run the hyperparameter search
image_captioner.random_parameter_search(
n_iterations=args.n_iterations, n_captions=args.n_captions, lm_max_length=args.lm_max_length,
lm_do_sample=args.lm_do_sample, lm_temp_min=args.lm_temp_min, lm_temp_max=args.lm_temp_max,
n_objects_min=args.n_objects_min, n_objects_max=args.n_objects_max, n_places_min=args.n_places_min,
n_places_max=args.n_places_max, caption_strategies=args.caption_strategies
)
| milenakapralova/socraticmodels | scripts/coco_caption_base_hp_tune.py | coco_caption_base_hp_tune.py | py | 3,103 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
... |
72416331709 | from socket import *
import time
import osascript
from multiprocessing import Process, Manager, Value
import os
#osascript -e 'display notification "{}" with title "{}"'
volume = 0
def recieve_data(val):
serverSock = socket(AF_INET, SOCK_STREAM)
serverSock.bind(('', 7777))
serverSock.listen(1)
connectionSock, addr = serverSock.accept()
print("Client address : ", str(addr))
while True:
print("val : ", val.value)
try :
vol = int(connectionSock.recv(4).decode('utf-8'))
if vol == 1111:
print("mute")
osascript.osascript('set volume output muted TRUE')
val.value = 0
while True:
vol = int(connectionSock.recv(4).decode('utf-8'))
if vol == 2222:
osascript.osascript('set volume output muted FALSE')
break
if vol == 3333:
print("screenshot")
os.system("screencapture screen.png")
vol = 0
if vol == 4444:
print("fix volume")
osascript.osascript('tell app "System Events" to shut down')
time.sleep(5)
if vol < 300:
val.value = vol
except:
pass
def volume_control(val):
while True:
print("volume : ", val.value)
osascript.osascript("set volume output volume " + str(val.value))
time.sleep(0.1)
if __name__ == '__main__':
v = Value('i', 0)
p0 = Process(target = recieve_data, args = (v,))
p0.start()
p1 = Process(target = volume_control, args = (v,))
p1.start()
p0.join()
p1.join() | Arc1el/DeepLearning_Jetson_AI | server.py | server.py | py | 1,802 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "osascript.osascript",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "osascript.osascript",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "osascript.osascript... |
20921250486 | import networkx as nx
from sklearn.cluster import SpectralClustering
def spectral_clustering(G, n_clusters=2):
adj_mat = nx.to_numpy_matrix(G)
sc = SpectralClustering(n_clusters, affinity='precomputed', n_init=100)
sc.fit(adj_mat)
clusters = {}
for i in range(len(sc.labels_)):
if sc.labels_[i] not in clusters:
clusters[sc.labels_[i]] = []
clusters[sc.labels_[i]].append(i)
return clusters.values()
| sharpenb/Multi-Scale-Modularity-Graph-Clustering | Scripts/clustering_algorithms/spectral_clustering.py | spectral_clustering.py | py | 454 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "networkx.to_numpy_matrix",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sklearn.cluster.SpectralClustering",
"line_number": 7,
"usage_type": "call"
}
] |
12170535231 | # Created on 24 September 2019
from square import Square, getScaledFont
from random import randint
from math import cos, sin, pi, atan, copysign
from pygame.mixer import *
from pygame.draw import rect
from pygame.locals import *
from pygame.time import Clock
from pygame.display import update
from pygame.mouse import get_pos
class GameDriver:
def __init__(self, dim, w):
self.dim = dim
self.w = w
self.squares = []
self.vals = []
# Stores sets of ((x,y): (#Squares to slide, surface))
self.slides = {}
self.slide_duration = 300
self.v = (0, 0)
self.score = 0
self.prev_score = 0
for y in range(dim[1]):
row = []
val = []
for x in range(dim[0]):
row.append(None)
val.append(-1)
self.squares.append(row)
self.vals.append(val)
def drawBoard(self, display):
for y, row in enumerate(self.squares):
for x, s in enumerate(row):
if s != None:
display.blit(s.surface, (x * self.w, y * self.w))
else:
rect(display, (0, 0, 0), (x * self.w, y * self.w, self.w, self.w))
rect(display, (255, 255, 255), (x * self.w, y * self.w, self.w, self.w), 2)
update()
def move(self, display, undo):
if len(self.slides) == 0:
return
if undo:
self.score = self.prev_score
for y, (row1, row2) in enumerate(zip(self.squares, self.vals)):
for x, (s, val) in enumerate(zip(row1, row2)):
if val == -1 and s != None:
self.squares[y][x] = None
elif val != -1 and s == None:
self.squares[y][x] = Square(val, self.w)
elif val != -1 and s != None:
self.squares[y][x].changeVal(val)
updates = 20
for i in range(updates):
for x, y in self.slides.keys():
v, surface = self.slides[(x, y)]
xf, yf = x + v[0], y + v[1]
if undo:
v = (-v[0], -v[1])
x, xf = xf, x
y, yf = yf, y
v = (v[0] * self.w, v[1] * self.w)
x1, y1 = x * self.w, y * self.w
dx, dy = v[0] * i / updates, v[1] * i / updates
rect(display, (0, 0, 0), (x1 + dx, y1 + dy, self.w, self.w))
dx, dy = v[0] * (i + 1) / updates, v[1] * (i + 1) / updates
display.blit(surface, (x1 + dx, y1 + dy))
update()
Clock().tick(updates * 1000 / self.slide_duration)
self.drawBoard(display)
def addSquares(self, display):
nones = []
for y, row in enumerate(self.squares):
for x, s in enumerate(row):
if s == None:
nones.append((x, y))
for i in range(min(len(nones), 2)):
idx = randint(0, len(nones) - 1)
x, y = nones[idx]
s = Square(2, self.w)
self.squares[y][x] = s
display.blit(s.surface, (x * self.w, y * self.w))
nones.pop(idx)
update()
def lost(self):
for y, row in enumerate(self.squares):
for x, s in enumerate(row):
if s == None:
return False
else:
adjacent = []
for delta in ((-1, 0), (1, 0), (0, -1), (0, 1)):
x1, y1 = x + delta[0], y + delta[1]
in_range = 0 <= x1 < self.dim[0] and 0 <= y1 < self.dim[1]
if in_range and self.squares[y1][x1] != None:
adjacent.append(self.squares[y1][x1].val)
if s.val in adjacent:
return False
return True
def updateScore(self, display, score_rect):
font = getScaledFont("Times New Roman", (score_rect.w, score_rect.h), str(self.score))
text = font.render(str(self.score), 1, (255, 255, 255))
text_rect = text.get_rect(center=(score_rect.centerx, score_rect.centery))
rect(display, (0, 0, 0), text_rect)
display.blit(text, text_rect)
def run(self, display, events, undo_rect, score_rect):
for e in events:
if e.type == MOUSEBUTTONUP and e.button == BUTTON_LEFT and \
undo_rect.collidepoint(get_pos()):
self.move(display, True)
self.slides.clear()
elif e.type == KEYUP:
if e.key == K_LEFT:
self.v = (-1, 0)
elif e.key == K_RIGHT:
self.v = (1, 0)
elif e.key == K_UP:
self.v = (0, -1)
elif e.key == K_DOWN:
self.v = (0, 1)
else:
continue
self.slides.clear()
self.prev_score = self.score
move_x = self.v[0] != 0
is_neg = -1 in self.v
idx = 0 if move_x else 1
lb = 0 if is_neg else -abs(self.dim[idx] * self.v[idx]) + 1
ub = abs(self.dim[idx] * self.v[idx]) if is_neg else 1
blanks = []
merges = []
prev = []
for v1 in range(lb, ub):
for v2 in range(self.dim[1 - idx]):
if len(blanks) <= v2:
blanks.append(0)
prev.append(0)
merges.append(0)
x = abs(v1) if move_x else v2
y = v2 if move_x else abs(v1)
s = self.squares[y][x]
self.vals[y][x] = -1 if s == None else s.val
if s == None:
blanks[v2] += 1
else:
offset = blanks[v2] + merges[v2]
dx, dy = self.v[0] * offset, self.v[1] * offset
last_val = prev[v2]
prev[v2] = s.val
if last_val == s.val:
offset += 1
dx1, dy1 = self.v[0] * offset, self.v[1] * offset
self.slides[(x, y)] = ((dx1, dy1), s.surface)
self.squares[y + dy1][x + dx1].upgrade()
self.squares[y][x] = None
prev[v2] = 0
merges[v2] += 1
self.score += s.val * 2
elif offset != 0:
self.slides[(x, y)] = ((dx, dy), s.surface)
self.squares[y + dy][x + dx] = s
self.squares[y][x] = None
self.move(display, False)
self.addSquares(display)
if sum(merges) >= 3:
music.load("bomb.mp3")
music.play()
self.updateScore(display, score_rect)
return self.lost()
| AaronOrenstein210/2048 | gameDriver.py | gameDriver.py | py | 7,331 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pygame.draw.rect",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "square.Square... |
18654748060 | from typing import Dict, List, Type
from src.domain.models.pets import Pets
from src.domain.use_cases import FindPet as FindPetInterface
from src.data.interfaces import PetRepositoryInterface
class FindPet(FindPetInterface):
"""Use case for Find pet"""
def __init__(self, pets_repository: Type[PetRepositoryInterface]):
self.pets_repository = pets_repository
def by_id(self, pet_id: int) -> Dict[bool, List[Pets]]:
"""Method By id"""
response = None
validate = isinstance(pet_id, int)
if validate:
response = self.pets_repository.select_pet(pet_id=pet_id)
return {"Success": validate, "Data": response}
def by_user_id(self, user_id: int) -> Dict[bool, List[Pets]]:
"""Get pet by name"""
response = None
validate = isinstance(user_id, int)
if validate:
response = self.pets_repository.select_pet(user_id=user_id)
return {"Success": validate, "Data": response}
def by_pet_id_and_user_id(
self, pet_id: int, user_id: int
) -> Dict[bool, List[Pets]]:
"""Get pet by name"""
response = None
validate = isinstance(user_id, int) and isinstance(pet_id, int)
if validate:
response = self.pets_repository.select_pet(user_id=user_id, pet_id=pet_id)
return {"Success": validate, "Data": response}
| MatheusDev20/flask-application-clean-arch | src/data/find_pet/find.py | find.py | py | 1,392 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "src.domain.use_cases.FindPet",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "src.data.interfaces.PetRepositoryInterface",
"line_number": 10,
"usage_type": "name"
},
{
... |
19400730749 | ###############################################################################
# Process to read Customer Updates #
#
# Pre-requisites: Kafka server should be running #
###############################################################################
import os
import sys
import logging
import json
import settings as SETTINGS
curpath = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join (curpath, "../")))
from app_messaging_utils import SimpleKafkaConsumer, SimpleKafkaMessage
from app_models import Customer, AppEventType
from app_utils import MongoRepository, DbEntity
###############################################################################
class MessageProcessor():
def __init__(self, process_func=None):
#Create and configure logger
logfile = os.path.abspath('{0}/{1}'.format(SETTINGS.Logging["LogFolder"],SETTINGS.Logging["LogFile"]))
os.makedirs(os.path.dirname(logfile), exist_ok=True)
logging.basicConfig(
filename=logfile,
format='%(asctime)s %(message)s',
filemode='a'
)
#Creating an object
self.logger=logging.getLogger()
#Setting the threshold of logger to DEBUG
self.logger.setLevel(SETTINGS.Logging["LogLevel"])
self.config = SETTINGS.KafkaService
self.topic = SETTINGS.MESSAGE_TOPIC
self.customer_repo = MongoRepository(
logger=self.logger,
server=SETTINGS.MongoDB["Url"],
port=SETTINGS.MongoDB["Port"],
database=SETTINGS.MongoDB["Db"],
collection=SETTINGS.MongoDB["Collection"],
session_id=1)
###########################################################################
def process_message(self, evt_msg: SimpleKafkaMessage):
'''
Function to process SimpleKafkaMessage
Deserialize the SimpleKafkaMessage,
extract and process relevant payload
'''
try:
evt = json.loads(evt_msg.message)
if evt["app_event_type"] == AppEventType.Insert:
entity = evt["after_change"]
customer = Customer(
id=entity["id"],
name=entity["name"],
phone=entity["phone"],
email=entity["email"]
)
msg="Processing INSERT message for customer id:{0}".format(customer.id)
print(msg)
eid = self.customer_repo.create(customer) # expect to get back an ObjectId
msg="Created customer id:{0}".format(customer.id)
print(msg)
self.logger.debug(msg)
elif evt["app_event_type"] == AppEventType.Update:
entity = evt["after_change"]
customer = Customer(
id=entity["id"],
name=entity["name"],
phone=entity["phone"],
email=entity["email"]
)
msg="Processing UPDATE message for customer id:{0}".format(customer.id)
print(msg)
self.customer_repo.update_by_id(customer.id, customer)
msg="Updated customer id:{0}".format(customer.id)
print(msg)
self.logger.debug(msg)
elif evt["app_event_type"] == AppEventType.Delete:
entity = evt["after_change"]
customer = Customer(
id=entity["id"],
name=entity["name"],
phone=entity["phone"],
email=entity["email"]
)
msg="Processing DELETE message for customer id:{0}".format(customer.id)
print(msg)
self.customer_repo.delete_by_id(customer.id)
msg="Deleted customer id:{0}".format(customer.id)
print(msg)
self.logger.debug(msg)
else:
pass
except Exception as e:
msg = "Error in process_message function: {0}".format(str(e))
print(msg)
self.logger.error(msg)
###########################################################################
def read_messages(self):
'''
Function to read messages from kafka queue
'''
reader_id = self.config["group.id"]
counter=0
try:
msg = "Starting Process:{0} to read topic:{1} from Kafka Queue".format( reader_id , self.topic )
self.logger.info(msg)
print(msg)
consumer = SimpleKafkaConsumer(logger=self.logger)
consumer.configure(config=self.config)
print ("Starting Consumer")
for evt_msg in consumer.consume(topics=['MICROSERVICE-CUSTOMER-UPDATES']):
counter +=1
# msg = "Received msg: {0} # {1}".format(counter, evt_msg.message)
# print(msg)
# self.logger.debug(msg)
# Process the message
self.process_message(evt_msg)
except KeyboardInterrupt:
msg = "\n\n Exiting Process:'{0}'. {1} message(s) read on topic from Kafka Queue:'{2}'".format( reader_id, counter, self.topic )
print (msg)
self.logger.info(msg)
except Exception as e:
msg = "Error in {0} : {1}".format(reader_id, str(e))
print(msg)
self.logger.error(msg)
###############################################################################
if __name__ == "__main__":
MessageProcessor().read_messages()
############################################################################### | bbcCorp/py_microservices | src/app_services_replication/message_processor.py | message_processor.py | py | 5,987 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_num... |
74021781309 | from typing import List
from collections import Counter
from time import time
import matplotlib.pyplot as plt
import numpy as np
# constants
ENGLISH_ALPHABET_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ '
def get_string_size(string: str, format: str='utf8') -> int:
'''Returns size of string in bytes'''
return len(string.encode('utf-8'))
def get_words_from_text(text: str, approved_chars=ENGLISH_ALPHABET_CHARS) -> List[str]:
'''Returns list of filtered words from a text'''
# filter unwanted characters from text
text = ''.join(char for char in text if char in approved_chars)
# split and format words into list
words = [word.lower() for word in text.split(' ') if len(word) > 0]
return words
# read in file
with open('input.txt', 'r') as input_file:
# iterate through file entries and extract words
words = []
for i, entry_text in enumerate(input_file):
# get words from text and append
entry_words = get_words_from_text(entry_text)
words.extend(entry_words)
# count and rank words
word_count_rank = dict(Counter(words).most_common())
n_unique_words = len(word_count_rank)
# plot
fig, ax = plt.subplots()
plot = ax.plot(range(n_unique_words), list(word_count_rank.values()))
ax.set_xticks(np.arange(1, n_unique_words+1, 25))
n_labels = 6
for i, word in enumerate(list(word_count_rank)[:n_labels]):
ax.text(i, word_count_rank[word], word, fontsize=8)
plt.show() | lucrae/zipf-score | side/score_old.py | score_old.py | py | 1,510 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
71844063869 | from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from simple_menu import MenuItem
submenu_items = [
MenuItem(
_("customers").capitalize(),
reverse("packs:sales_customer_list"),
weight=20,
icon="bx-right-arrow-alt",
),
MenuItem(
_("invoices").capitalize(),
reverse("packs:sales_invoice_list"),
weight=20,
icon="bx-right-arrow-alt",
),
]
sales_item = MenuItem(
_("sales").capitalize(), "#", icon="bxs-shopping-bag", children=submenu_items
)
| dbsiavichay/faclab | apps/accounts/menus/sales.py | sales.py | py | 563 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "simple_menu.MenuItem",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 8,
"usage_type": "call"
},
{
"api_n... |
4783789916 | # Adapted from pytorch examples
from __future__ import print_function
from torch import nn, optim
from railrl.core import logger
import numpy as np
from railrl.pythonplusplus import identity
from railrl.torch.core import PyTorchModule
from railrl.torch.networks import Mlp
import railrl.torch.pytorch_util as ptu
class ReprojectionNetworkTrainer():
def __init__(
self,
train_dataset,
test_dataset,
model,
batch_size=128,
log_interval=0,
lr=1e-3,
**kwargs
):
self.log_interval = log_interval
self.batch_size = batch_size
if ptu.gpu_enabled():
model.cuda()
self.model = model
self.representation_size = model.representation_size
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
self.train_dataset, self.test_dataset = train_dataset, test_dataset
assert self.train_dataset['z'].dtype == np.float32
assert self.test_dataset['z'].dtype ==np.float32
assert self.train_dataset['z_proj'].dtype == np.float32
assert self.test_dataset['z_proj'].dtype == np.float32
self.mse = nn.MSELoss()
def get_batch(self, train=True):
dataset = self.train_dataset if train else self.test_dataset
ind = np.random.randint(0, len(dataset['z']), self.batch_size)
return {
'z': ptu.np_to_var(dataset['z'][ind, :]),
'z_proj': ptu.np_to_var(dataset['z_proj'][ind, :]),
}
def mse_loss(self, z_proj_hat, z_proj):
return self.mse(z_proj_hat, z_proj)
def train_epoch(self, epoch, batches=100):
self.model.train()
mses = []
losses = []
for batch_idx in range(batches):
data = self.get_batch()
z = data["z"]
z_proj = data['z_proj']
self.optimizer.zero_grad()
z_proj_hat = self.model(z)
mse = self.mse_loss(z_proj_hat, z_proj)
loss = mse
loss.backward()
mses.append(mse.data[0])
losses.append(loss.data[0])
self.optimizer.step()
logger.record_tabular("train/epoch", epoch)
logger.record_tabular("train/MSE", np.mean(mses))
logger.record_tabular("train/loss", np.mean(losses))
def test_epoch(self, epoch, save_network=True, batches=100):
self.model.eval()
mses = []
losses = []
for batch_idx in range(batches):
data = self.get_batch(train=False)
z = data["z"]
z_proj = data['z_proj']
z_proj_hat = self.model(z)
mse = self.mse_loss(z_proj_hat, z_proj)
loss = mse
mses.append(mse.data[0])
losses.append(loss.data[0])
logger.record_tabular("test/epoch", epoch)
logger.record_tabular("test/MSE", np.mean(mses))
logger.record_tabular("test/loss", np.mean(losses))
logger.dump_tabular()
if save_network:
logger.save_itr_params(epoch, self.model, prefix='reproj', save_anyway=True)
class ReprojectionNetwork(PyTorchModule):
def __init__(
self,
vae,
hidden_sizes=list([64, 128, 64]),
init_w=1e-3,
hidden_init=ptu.fanin_init,
output_activation=identity,
layer_norm=False,
**kwargs
):
self.save_init_params(locals())
super().__init__()
self.vae = vae
self.representation_size = self.vae.representation_size
self.hidden_init = hidden_init
self.output_activation = output_activation
# self.dist_mu = np.zeros(self.representation_size)
# self.dist_std = np.ones(self.representation_size)
self.dist_mu = self.vae.dist_mu
self.dist_std = self.vae.dist_std
self.relu = nn.ReLU()
self.init_w = init_w
hidden_sizes = list(hidden_sizes)
self.network=Mlp(hidden_sizes,
self.representation_size,
self.representation_size,
layer_norm=layer_norm,
hidden_init=hidden_init,
output_activation=output_activation,
init_w=init_w)
def forward(self, z):
z = z.view(-1, self.representation_size)
return self.network(z)
def __getstate__(self):
d = super().__getstate__()
# Add these explicitly in case they were modified
d["_dist_mu"] = self.dist_mu
d["_dist_std"] = self.dist_std
return d
def __setstate__(self, d):
super().__setstate__(d)
self.dist_mu = d["_dist_mu"]
self.dist_std = d["_dist_std"] | snasiriany/leap | railrl/torch/vae/reprojection_network.py | reprojection_network.py | py | 4,777 | python | en | code | 45 | github-code | 6 | [
{
"api_name": "railrl.torch.pytorch_util.gpu_enabled",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "railrl.torch.pytorch_util",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 32,
"usage_type": "call"
},
{
"... |
29924772061 | """
Author: JW
Date: 07/26/2023
Module Name: picture_capture_controls_uplink.py
Description:
This Python script is part of an image processing and classification application.
It provides various functions for interacting with images, databases, and user stacks.
The script includes functionalities such as simulating image classification, checking classification progress, and updating image labels in a database.
It is designed to work with Anvil, tkinter, multiprocessing, and PIL (Python Imaging Library) libraries.
Functions:
- `open_file_explorer`: Opens a file explorer dialog for selecting directories.
- `classify_images_simulate`: Simulates image classification and stores results in a database.
- `start_classifier_build`: Initiates the image classification process, handling new or existing image stacks.
- `check_classifier_progress`: Monitors the progress of image classification and retrieves completed labels and images.
- `submit_labels_to_db`: Handles the submission of labels to a database, updates labels, and moves files based on labels.
For detailed information on each function's purpose and usage, please refer to the function definitions and comments within the script.
"""
from time import sleep
import random
import json
import uuid
import multiprocessing
from PIL import Image
import anvil.media
import os
import io
import shutil
# Uplink imports:
try:
import utils.mySQL_utils as localSQL
from uplink_scripts.stack import Stack
# Local host imports
except (ModuleNotFoundError) as mod_err:
print("Trying local host imports in picture_capture_controls.py")
from ..utils import mySQL_utils as localSQL
from .stack import Stack
# NOTE: When running from a docker container, we will be unable to import tkinter:
try:
import tkinter as tk
from tkinter import filedialog
except(ImportError) as err:
print("Unable to import tkinter")
# Set up our stack:
image_stack = Stack()
def open_file_explorer():
"""
Opens a file explorer navigator for the user to select the source and / or destination directory.
Returns str(file_path)
*Depending on when function is called, file_path could be either the source or destination dir.
"""
try:
root = tk.Tk()
root.withdraw()
file_path = filedialog.askdirectory()
if not file_path:
file_path = "N/A"
root.destroy()
return file_path
except (Exception) as err:
print("tikinter not installed...returning empty path")
return ""
def classify_images_simulate(image_full_path, img_name_list, job_id):
""" Test function to simulate classify_images()
1. sleep 5 seconds
2. randomly pick a class
3. write result and job id to data-table
"""
labels = ["Cotton", "Plastic", "HID", "Tray", "Other"]
cnx = localSQL.sql_connect()
for index, img in enumerate(image_full_path):
# 1 sleep
sleep(10)
# 2 randomly select a label
rand = random.randint(0, 4)
label = labels[rand]
# write label & job_id to data-table:
img_name = img_name_list[index]
insert_query = f"INSERT INTO anvil_imgProcessor (job_id, img_name, img_label) VALUES ('{job_id}','{img_name}','{label}')"
localSQL.sql_insert(cnx, insert_query)
# Close db connection
localSQL.sql_closeConnection(cnx)
print("Finished classyfing")
def start_classifier_build(json_data):
"""
json_data: {image_path, num_images}
"""
# convert json dict to python dict
python_dict_classifier = json.loads(json_data)
# Unpack the dictionary:
page_num = python_dict_classifier.get("page_num")
user_id = python_dict_classifier.get("user_id")
num_images = python_dict_classifier.get("num_images")
file_path = python_dict_classifier.get("file_path_src")
# IF user wants to grab previous images (back_button press or jump_to_page) -> "pop" images from stack, ELSE get new images
try:
# Try getting images from the users stack using page_num as the list index.
labels, img_names, images, update_database = image_stack.pop(user_id, page_num)
# If the number of images retrieved == to number of images user currently wants to retrieve, return the images:
if (int(num_images) == len(img_names)):
print(f"Retrieved previous images for page {page_num}")
return images, labels, img_names, update_database
# If the user changed the number of images to display on each page -> reset stack and grab new images.
else:
print(f"Number of images changed... reseting users stack")
# TODO: If user changed the number of images to grab, reset the users stack:
image_stack.reset_stack(user_id)
# If we get a KeyError or IndexError -> grab new images from directory.
except (KeyError, IndexError) as err:
print(f"{err}: Grabbing new images for page {page_num}")
# Set up a job ID:
job_id = str(uuid.uuid4())
job_id = job_id.replace("-", "")
try:
# NOTE: with large n we may want to only a subset of all images
all_files_in_dir = os.listdir(file_path)
# Filter to select only image files:
all_images = [file for file in all_files_in_dir if file.endswith(".jpg") or file.endswith(".png")]
except (Exception) as e:
print("Could not access directory")
return None
num_images_found = len(all_images)
# Check to make sure images were found in the directory:
if(num_images_found == 0):
print("Dir does not contain any images")
return None, None, None, None
# If for whatever reason the directory has < 10 images -> grab all found images
if(num_images_found < int(num_images)):
# Randomly select n images:
rand_n_imgs = random.sample(all_images, int(num_images_found))
# now that we've selected our images, lets move them to a seperate folder such that they are not re-used
else:
# Randomly select n images:
rand_n_imgs = random.sample(all_images, int(num_images))
# now that we've selected our images, lets move them to a seperate folder such that they are not re-used
#Establish Connection to the Databse:
cnx = localSQL.sql_connect()
# Write job ID to anvil_img_Classifier data-table:
insert_query = f"INSERT INTO anvil_imgProcessor (job_id) VALUES ('{job_id}')"
localSQL.sql_insert(cnx, insert_query)
#Close connection to the database:
localSQL.sql_closeConnection(cnx)
imgs_full_path, img_name_list = [], []
# Loop accomplishes two things:
# 1) Creates the full image path for each randomly selected image
# 2) Reads in the image and converts to anvil.BlobMedia
for image in rand_n_imgs:
# Get the full image path
img_full_path = file_path + "/" + image
# Keep track of all the img paths
imgs_full_path.append(img_full_path)
img_name_list.append(image)
##############
# NOTE: SPAWN new process here:
#classify_images(imgs_full_path, job_id)
##############
process = multiprocessing.Process(target=classify_images_simulate, args=(imgs_full_path, img_name_list, job_id))
# Start the process
process.start()
return job_id
def check_classifier_progress(json_data):
"""
This function will be called every n seconds once timer reaches 0...
1. Every n seconds go out and check database to see how many images / n are ready
1a. if > n images are done, return % finished and update progress bar.
1b. if n images are done retrieve labels, set flag HIGH indiciating we are ready to display the images to the user
"""
MAX_STACK_HEIGHT = 50 # Starting with 50, could be increased... (100*num_images) = # of elem ents in each stack
# convert json dict to python dict
python_dict_classifier = json.loads(json_data)
# Unpack the dictionary:
user_id = python_dict_classifier.get("user_id")
num_images = python_dict_classifier.get("num_images")
job_id = python_dict_classifier.get("job_id")
file_path = python_dict_classifier.get("file_path_src")
# Check database using job_id to see how many images are ready.
# Establish Connection to the Databse:
cnx = localSQL.sql_connect()
# Create a cursor
cursor = cnx.cursor()
search_query = f"SELECT * FROM anvil_imgProcessor WHERE job_id = ('{job_id}')"
cursor.execute(search_query)
rows = cursor.fetchall()
# Close the connection
cnx.close()
num_rows_ready = len(rows)
print(num_rows_ready)
img_labels_list, img_name_list, img_list = [], [], []
img_labels_dict = {}
if(num_rows_ready == (num_images + 1)):
done_classifying = True # Set our flag to true
pct_ready = 1
# Once images are done get the assigned labels:
for row in rows:
# Get the assigned label for each image:
img_labels_list.append(row[-1])
img_name_list.append(row[-2])
# Delete the first element of each list (first element has NULL label and img name values)
del img_labels_list[0]
del img_name_list[0]
# Store key-value pair (img_name: label) in dict data-structure
for i in range(len(img_name_list)):
img_labels_dict[img_name_list[i]] = img_labels_list[i]
# Using the image name and file path, import the image to type anvil.BlobMedia
# Get the full image path
img_full_path = file_path + "/" + img_name_list[i]
# Retrieve our image using PIL
pil_img = Image.open(img_full_path)
# resize image to 1280 x 960
resized_image = pil_img.resize((960,720))
bs = io.BytesIO()
# Convert to bytes:
resized_image.save(bs, format="png")
# Conver to type anvil.BlobMedia so that we can display it for the client
anvil_image = anvil.BlobMedia("image/png", bs.getvalue(), name="cotton")
img_list.append(anvil_image)
print(img_labels_list)
print(img_labels_dict)
# Set-up the "stack" here:
# Pythonic: If user does not have a stack created, create one
try:
print(f"Adding images for user {user_id} to stack...")
image_stack.push(user_id,
img_labels_dict,
img_name_list,
img_list)
except (KeyError) as ke:
print("No ID found!")
print(f"Creating stack for user: {user_id} ")
image_stack.init_user(user_id,
img_labels_dict,
img_name_list,
img_list)
#Check length of stack if stack is > max_len --> start removing elements
try:
stack_height = image_stack.size(user_id)
if(stack_height > MAX_STACK_HEIGHT):
print(f"Users stack reached max height of {MAX_STACK_HEIGHT}, Removing first element...")
# Delete first [0] from stack
image_stack.delete_element(user_id)
except (KeyError) as err:
print(f"Unable to get height of users stack: {err}")
return [done_classifying, pct_ready, img_labels_dict, img_name_list, img_list]
else:
done_classifying = False
pct_ready = ((num_rows_ready - 1) / (num_images)) * 100
return [done_classifying, pct_ready, False, False, False]
def submit_labels_to_db(json_data):
"""
Retrieves images from src directory, runs through classifier, adds images to users stack, and returns images and labels.
Function Outline:
1. Unpack JSON data
2. Determine if retreiving previously used images, or grabing new images from directory.
Using a Try / Except statement, that returns a IndexError if the index (page_num) is not valid (aka grab new images then)
3. Access the source directory (file_path) and randomly selected num_images_to_get from directory.
4. Convert each image to type Anvil.BlobMedia so that we can display them in a Canvas component.
4a. TEMPORARY: assign image a "dummy" label of either HID or Cotton
4b. TODO: ADD in classifers to replace "dummy" labels
5. Check if user already has a stack made for them, if not create one using user_id
5a. Add images to already made or newly created user stack
6. Check if MAX_STACK_HEIGHT has been exceeded, if so remove first entry from stack.
6. Return the images (img_list), img_labels (img_label_dict), img names (img_name_list), and update_database BOOLEAN indicator
"""
#Extract our json data into a python dict
python_dict = json.loads(json_data)
processed_dir = python_dict.get("file_path_dst")
#processed_dir = "/home/pi/Desktop/Jon_workspace/Anvil/processed_images" # NOTE: ONLY USED FOR TESTING (REMOVE FOR DEPLOYMENT)
# Create the destination directory if it doesn't exist
if not os.path.exists(processed_dir):
os.makedirs(processed_dir)
keys_list = []
# Unpack the dict:
classifier_labels = python_dict.get("original_labels")
#human modified labels
modified_labels = python_dict.get("modified_labels")
selected_folder = python_dict.get("selected_folder")
page_num = python_dict.get("page_num")
user_id = python_dict.get("user_id")
use_sub_folders = python_dict.get("proc_sub_folders")
#If user manually specified the path, enter:
if(selected_folder == "dir"):
# Add the user modified labels to their stack:
try:
print(f"Adding modified labels for user {user_id} to stack...")
image_stack.push(user_id,
user_labels=modified_labels)
except (KeyError) as ke:
print("No ID found!")
print(f"Creating modified labels stack for user: {user_id} ")
image_stack.init_user(user_id,
user_labels=modified_labels)
file_path = python_dict.get("file_path_src")
#file_path = "/home/pi/Desktop/Jon_workspace/Anvil/Cotton" # NOTE: ONLY USED FOR TESTING (REMOVE FOR DEPLOYMENT)
# Check if we need to set up sub-folders:
if(use_sub_folders):
print("Setting up sub folders")
# Set-up sub-folders for the processed images
proc_cotton_dir = processed_dir + "/cotton"
proc_tray_dir = processed_dir + "/tray"
proc_plastic_dir = processed_dir + "/plastic"
proc_hid_dir = processed_dir + "/HID"
proc_other_dir = processed_dir + "/other"
proc_mislabeled_dir = processed_dir + "/mislabeled"
# Create the destination directory if it doesn't exist
if not os.path.exists(proc_cotton_dir):
os.makedirs(proc_cotton_dir)
if not os.path.exists(proc_tray_dir):
os.makedirs(proc_tray_dir)
if not os.path.exists(proc_plastic_dir):
os.makedirs(proc_plastic_dir)
if not os.path.exists(proc_hid_dir):
os.makedirs(proc_hid_dir)
if not os.path.exists(proc_other_dir):
os.makedirs(proc_other_dir)
if not os.path.exists(proc_mislabeled_dir):
os.makedirs(proc_mislabeled_dir)
# get all the keys (image names)
for key in classifier_labels:
keys_list.append(key)
# Next, Establish Connection to the Databse:
cnx = localSQL.sql_connect()
# Loop through each key(image name) and add to correct db column
for key in range(len(keys_list)):
image_name = keys_list[key]
orginal_label = classifier_labels[keys_list[key]]
corrected_label = modified_labels[keys_list[key]]
# Get our source path (used with moving the image):
source_path = os.path.join(file_path, image_name)
# Get our processed img path:
dest_path = os.path.join(processed_dir, image_name)
if(orginal_label == corrected_label):
correctP = True
# Add to to columns: Correct_column, JOINT, and Path
add_query = f"INSERT INTO anvil_imgClassification ({corrected_label}, JOINT, Path) VALUES ('{str(keys_list[key])}', '{str(orginal_label)}' ,'{str(source_path)}')"
localSQL.sql_insert(cnx, add_query)
else:
# If the classifier got the prediction wrong, add img file name to GotWrong column and correct column in database
correctP = False
# Add to to columns: GotWrong, Correct_column, JOINT, and Path
gotWrong_query = f"INSERT INTO anvil_imgClassification (GotWrong, {corrected_label}, JOINT, Path) VALUES ('{str(keys_list[key])}', '{str(keys_list[key])}', '{str(orginal_label)}' , '{str(source_path)}')"
localSQL.sql_insert(cnx, gotWrong_query)
#Lastly, move image to new processed directory:
try:
if(use_sub_folders):
if(corrected_label == "Cotton"):
shutil.copy(source_path, proc_cotton_dir)
elif(corrected_label == "Plastic" ):
shutil.copy(source_path, proc_plastic_dir)
elif(corrected_label == "HID" ):
shutil.copy(source_path, proc_hid_dir)
elif(corrected_label == "Tray" ):
shutil.copy(source_path, proc_tray_dir)
elif(corrected_label == "Other" ):
shutil.copy(source_path, proc_other_dir)
# Check if we also need to move file to the GotWrong fodler:
if(correctP):
# Delete the file from the src directory
if os.path.exists(source_path):
os.remove(source_path)
else:
# move file to the GotWrong folder:
shutil.move(source_path, proc_mislabeled_dir)
else:
shutil.move(source_path, dest_path)
except (FileNotFoundError) as e_file:
return
#Close connection to the database:
localSQL.sql_closeConnection(cnx)
return
elif(selected_folder == "update"):
# Need to update modified labels stack:
print(f"Updating modified labels from page {page_num} for user {user_id}")
image_stack.update_stack(user_id, page_num, user_labels=modified_labels)
# Names of table columns, will be iterated over
column_names = ["Cotton","Plastic", "HID", "Tray", "Other"]
print("Updating database...")
# Search through CSV and find the lines that need to be altered:
# get all the keys
for key in classifier_labels:
keys_list.append(key)
# Next, Establish Connection to the Databse:
cnx = localSQL.sql_connect()
# Create a cursor
cursor = cnx.cursor()
for key in range(len(keys_list)):
image_name = keys_list[key]
corrected_label = modified_labels[keys_list[key]]
#Iterate over the possible column (labels) in the table:
for column in column_names:
#Search for img name in each column to get the row:
search_query = f"SELECT * FROM anvil_imgClassification WHERE {column} = ('{str(keys_list[key])}')"
cursor.execute(search_query)
result = cursor.fetchone()
try:
cnx.commit()
except (Exception) as err:
pass
# RESULT RETURNED FORMAT: (row_number(id), user_id, Cotton, Plastic, Tray, HID, Other, GotWrong, PATH, JOINT) of type tuple
if result:
if corrected_label == column:
print(f"No need to update img {image_name} found in {column} with label {corrected_label}, breaking out...")
break
# print(f"result value returned: {result}")
# print(f"Image name {str(keys_list[key])}")
# Get row number:
row_number = str(result[0])
# print(f"column value: {row_number}")
# Get JOINT value:
joint_value = str(result[-1])
# Set row value in previous column and GotWrong column to None:
update_query = "UPDATE anvil_imgClassification SET %s = NULL, GotWrong = NULL WHERE id = %s"%(column, row_number)
cursor.execute(update_query)
cnx.commit()
# check if joint == new_label
if(joint_value == corrected_label):
print("Joint == Correct!")
# Add img name to the corrected_label colum in row_number:
update_query = f"UPDATE anvil_imgClassification SET {str(corrected_label)} = '{str(keys_list[key])}' WHERE id = '{row_number}'"
cursor.execute(update_query)
cnx.commit()
else:
update_query =f"UPDATE anvil_imgClassification SET {str(corrected_label)} = '{str(keys_list[key])}', GotWrong = '{str(keys_list[key])}' WHERE id = '{row_number}'"
cursor.execute(update_query)
cnx.commit()
# print("breaking..")
break
else:
print(f"result not found in column {column}")
#Close connection to the database:
cnx.close()
return | JonWakefield/Anvil-Web-App | server_code/uplink_scripts/picture_capture_controls_uplink.py | picture_capture_controls_uplink.py | py | 22,281 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "stack.Stack",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "tkinter.filedi... |
25995631588 | from dataclasses import dataclass, field
from .. import docker
from .. import exceptions
from .. import utils
from ..runtime import register, RuntimePlugin
@register
@dataclass
class Docker(RuntimePlugin):
name: str = field(init=False, default="Docker")
def init(self, graph, outputs):
# Parse the users docker conf file
# and record a list of logins we know about
self.auths = set()
self.cfg = None
self.graph = graph
self.image_pull_secrets = {}
cfg = docker.parse_config()
if cfg:
self.auths |= set(cfg.get("auths").keys())
self.cfg = cfg
def image_secrets_for(self, image):
m = docker.parse_docker_tag(image)
if not m or m["domain"] not in self.auths:
return None
r = utils.AttrAccess(
auth=docker.auth_for(self.cfg, m["domain"]),
key=f"{self.graph.name}-{m['domain']}",
)
self.image_pull_secrets[r.key] = r
return r
| parlaylabs/model | model/runtimes/docker.py | docker.py | py | 1,011 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "runtime.RuntimePlugin",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "runtime.register",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dataclasses.d... |
75385540986 | from collections import defaultdict
T = int(input())
for i in range(T):
N = int(input())
c = list(map(int, input().split(' ')))
g = defaultdict(list)
for _ in range(N - 1):
edge = list(map(int, input().split(' ')))
g[edge[0]].append(edge[1])
g[edge[1]].append(edge[0])
def dfs(u, pere):
maxi = 0
for v in g[u]:
if v != pere:
maxi = max(maxi, dfs(v, u))
return maxi + c[u - 1]
L = []
for v in g[1]:
L.append(dfs(v, 1))
L.sort()
res = c[0]
if len(L) > 0:
res += L[-1]
if len(L) > 1:
res += L[-2]
print(f"Case #{i + 1}: {res}")
| fortierq/competitions | fb_hacker_cup/2021/qualification/c1_gold_mine.py | c1_gold_mine.py | py | 686 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
}
] |
27884694892 | from django.contrib import admin
from .models import Division, Farm
# Register your models here.
class DivisionAdmin(admin.ModelAdmin):
list_display = (
"division_name",
"division_code",
)
admin.site.register(Division, DivisionAdmin)
admin.site.register(Farm)
| Wageesha95/dbapp-live | farms/admin.py | admin.py | py | 289 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 14,
"usage_type": "call"
},... |
40796544139 | import pydantic
from pydantic import validator
import typing
from uuid import UUID, uuid4
class SchemaCustomer(pydantic.BaseModel):
id: str
name: str
last_name: str
email: pydantic.EmailStr
age: pydantic.PositiveInt
@validator('id', pre=True, always=True)
def convert_id_to_str(cls, v):
return str(v)
class SchemaCustomerCreation(pydantic.BaseModel):
name: str
last_name: str
email: pydantic.EmailStr
age: pydantic.PositiveInt
class SchemaCustomerUpdate(pydantic.BaseModel):
name: typing.Union[str, None]
last_name: typing.Union[str, None]
email: typing.Union[pydantic.EmailStr, None]
age: typing.Union[pydantic.PositiveInt, None]
| edmon1024/workshop-api-ejemplo-fastapi | app/schemas.py | schemas.py | py | 708 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pydantic.EmailStr",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pydantic.PositiveInt",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name"... |
5085250146 | from copy import deepcopy
import json
import re
from flask import render_template
from maf_api_mock_data import EGFR_BLCA_BRCA as FAKE_MAF_DATA
from hotspots.seqpeek.tumor_types import tumor_types as ALL_TUMOR_TYPES
from app_logging import get_logger
log = get_logger()
try:
from hotspots.seqpeek.gene_list import gene_list as GENE_LIST
except ImportError:
log.error("Loading gene list failed, using static list.")
GENE_LIST = ['EGFR', 'TP53', 'PTEN']
from hotspots.seqpeek.uniprot_data import get_uniprot_data
from hotspots.seqpeek.interpro_data import get_protein_domain_data
from hotspots.seqpeek.cluster_data import get_cluster_data as get_cluster_data_remote
from hotspots.seqpeek.mutation_data import get_mutation_data as get_mutation_data_remote
from hotspots.seqpeek.mutation_data import get_mutation_data_summary_for_gene
SEQPEEK_VIEW_DEBUG_MODE = False
SEQPEEK_VIEW_MUTATION_DEBUG = False
SAMPLE_ID_FIELD_NAME = 'patient_barcode'
TUMOR_TYPE_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'amino_acid_position'
MUTATION_DATA_PROTEIN_FIELD = 'uniprot_id'
PROTEIN_DOMAIN_DB = 'PFAM'
ALPHA_FINDER = re.compile('[\W_]+', re.UNICODE)
TEMPLATE_NAME = 'hotspots/seqpeek/view.html'
def get_number_of_unique_samples(track):
sample_ids = set()
for mutation in track['mutations']:
sample_ids.add(mutation[SAMPLE_ID_FIELD_NAME])
return len(sample_ids)
# TODO remove if not needed
def clean_track_mutations(mutations_array):
retval = []
for mutation in mutations_array:
cleaned = deepcopy(mutation)
cleaned[COORDINATE_FIELD_NAME] = int(mutation[COORDINATE_FIELD_NAME])
retval.append(cleaned)
return retval
def sort_track_mutations(mutations_array):
return sorted(mutations_array, key=lambda k: k[COORDINATE_FIELD_NAME])
def get_track_statistics(track):
return {
'samples': {
'numberOf': get_number_of_unique_samples(track)
}
}
def filter_protein_domains(match_array):
return [m for m in match_array if m['dbname'] == PROTEIN_DOMAIN_DB]
def get_table_row_id(tumor_type):
return "seqpeek_row_{0}".format(tumor_type)
def build_seqpeek_regions(protein_data):
return [{
'type': 'exon',
'start': 0,
'end': protein_data['length']
}]
def build_summary_track(tracks, render_summary_only=False):
all = []
for track in tracks:
all.extend(track["mutations"])
return {
'mutations': all,
'label': 'COMBINED',
'tumor': 'none-combined',
'type': 'summary',
'do_variant_layout': True if render_summary_only is True else False
}
def get_track_label(track):
return track[TUMOR_TYPE_FIELD]
def process_raw_domain_data(data):
result = []
for item in data:
database = item['database']
# Filter for PFAM
if not database.startswith('PF'):
continue
domain = {
'name': item['name'][:5] + '...',
'full_name': item['name'],
'locations': [{
'start': item['start'],
'end': item['end']
}],
'dbname': 'PFAM',
'ipr': {
'type': 'Domain',
'id': item['interpro_id'],
'name': item['name'][:2]
},
'id': database
}
result.append(domain)
log.debug("Found {total} domains, filtered down to {num}".format(total=len(data), num=len(result)))
return result
def get_protein_domains_remote(uniprot_id):
uniprot_data = get_uniprot_data(uniprot_id)
log.debug("UniProt entry: " + str(uniprot_data))
# Add protein domain data to the UniProt entry
raw_domain_data = get_protein_domain_data(uniprot_id)
domains = process_raw_domain_data(raw_domain_data)
uniprot_data['matches'] = domains
return uniprot_data
def get_protein_domains(uniprot_id):
return get_protein_domains_remote(uniprot_id)
def get_maf_data_remote(gene, tumor_type_list):
return get_mutation_data_remote(tumor_type_list, gene)
def get_mutation_data(gene, tumor_type_list):
if SEQPEEK_VIEW_MUTATION_DEBUG:
return deepcopy(FAKE_MAF_DATA['items'])
else:
return get_mutation_data_remote(tumor_type_list, gene)
def process_cluster_data_for_tumor(all_clusters, tumor_type):
clusters = filter(lambda c: c['tumor_type'] == tumor_type, all_clusters)
result = []
for index, cluster in enumerate(clusters):
item = {
'name': '',
'type': 'cluster',
'id': 'cluster_' + str(index),
'locations': [{
'start': cluster['start'],
'end': cluster['end']
}],
'mutation_stats': cluster['mutation_stats'],
'stats': cluster['stats']
}
result.append(item)
return result
def build_track_data(tumor_type_list, all_tumor_mutations, all_clusters):
tracks = []
for tumor_type in tumor_type_list:
mutations = filter(lambda m: m['tumor_type'] == tumor_type, all_tumor_mutations);
track_obj = {
TUMOR_TYPE_FIELD: tumor_type,
'mutations': mutations,
'clusters': process_cluster_data_for_tumor(all_clusters, tumor_type),
'do_variant_layout': True
}
if len(mutations) > 0:
track_obj['render_in_seqpeek'] = True
else:
track_obj['render_in_seqpeek'] = False
tracks.append(track_obj)
return tracks
def find_uniprot_id(mutations):
uniprot_id = None
for m in mutations:
if MUTATION_DATA_PROTEIN_FIELD in m:
uniprot_id = m[MUTATION_DATA_PROTEIN_FIELD]
break
return uniprot_id
def get_cluster_data(tumor_type_array, gene):
clusters = get_cluster_data_remote(tumor_type_array, gene)
return clusters
def sanitize_gene_input(gene_parameter):
return ALPHA_FINDER.sub('', gene_parameter)
def sanitize_normalize_tumor_type(tumor_type_list):
tumor_set = frozenset(ALL_TUMOR_TYPES)
sanitized = []
for tumor_param in tumor_type_list:
if tumor_param in tumor_set:
sanitized.append(tumor_param)
return sanitized
def format_tumor_type_list(tumor_type_array, selected_types=[]):
result = []
for tumor_type in tumor_type_array:
result.append({
'name': tumor_type,
'selected': tumor_type in selected_types
})
return result
def seqpeek(request_gene, request_tumor_list, summary_only=False):
gene = None
if request_gene is not None:
# Remove non-alphanumeric characters from parameters and uppercase all
gene = sanitize_gene_input(request_gene).upper()
parsed_tumor_list = sanitize_normalize_tumor_type(request_tumor_list)
log.debug("Valid tumors from request: {0}".format(str(parsed_tumor_list)))
tumor_types_for_tpl = format_tumor_type_list(ALL_TUMOR_TYPES, parsed_tumor_list)
context = {
'gene_select_widget': {
'action': '/seqpeek',
'tumor_type_select': True,
'all_tumor_types': tumor_types_for_tpl,
'button_label': 'Redraw'
},
'query_status': {
'no_mutations_found': False,
'uniprot_id_not_found': False,
'data_found': False,
'summary_only': False,
'insufficient_parameters': False,
'request_gene': request_gene
},
'gene_label': gene,
'is_gene_summary': summary_only,
'static_data': {
'gene_list': GENE_LIST,
'gene_label': gene,
'fill_in_gene': True
},
'all_tumor_types': tumor_types_for_tpl
}
if (len(parsed_tumor_list) == 0 and summary_only is False) or gene is None:
context['query_status']['insufficient_parameters'] = True
context['static_data']['fill_in_gene'] = False
context.update({
'static_data': json.dumps(context['static_data'])
})
return render_template(TEMPLATE_NAME, **context)
if summary_only is False:
cluster_data = get_cluster_data(parsed_tumor_list, gene)
maf_data = get_mutation_data(gene, parsed_tumor_list)
else:
maf_data = get_mutation_data_summary_for_gene(gene)
if len(maf_data) == 0:
context['query_status']['no_mutations_found'] = True
context['static_data']['fill_in_gene'] = False
context.update({
'static_data': json.dumps(context['static_data'])
})
return render_template(TEMPLATE_NAME, **context)
uniprot_id = find_uniprot_id(maf_data)
if uniprot_id is None:
context['query_status']['uniprot_id_not_found'] = True
context['static_data']['fill_in_gene'] = False
context.update({
'static_data': json.dumps(context['static_data'])
})
return render_template(TEMPLATE_NAME, **context)
log.debug("Found UniProt ID: " + repr(uniprot_id))
context['query_status']['data_found'] = True
protein_data = get_protein_domains(uniprot_id)
plot_data = {
'gene_label': gene,
'protein': protein_data
}
if summary_only is False:
track_data = build_track_data(parsed_tumor_list, maf_data, cluster_data)
plot_data['tracks'] = track_data
# Pre-processing
# - Sort mutations by chromosomal coordinate
for track in plot_data['tracks']:
track['mutations'] = sort_track_mutations(track['mutations'])
# Annotations
# - Add label, possibly human readable
# - Add type that indicates whether the track is driven by data from search or
# if the track is aggregate
for track in plot_data['tracks']:
track['type'] = 'tumor'
track['label'] = get_track_label(track)
plot_data['tracks'].append(build_summary_track(plot_data['tracks'], render_summary_only=False))
else:
summary_track = {
'mutations': sort_track_mutations(maf_data)
}
plot_data['tracks'] = [build_summary_track([summary_track], render_summary_only=True)]
for track in plot_data['tracks']:
# Calculate statistics
track['statistics'] = get_track_statistics(track)
# Unique ID for each row
track['render_info'] = {
'row_id': get_table_row_id(track[TUMOR_TYPE_FIELD])
}
plot_data['regions'] = build_seqpeek_regions(plot_data['protein'])
plot_data['protein']['matches'] = filter_protein_domains(plot_data['protein']['matches'])
# Filter the tracks-array for Seqpeek. Only leave tracks with at least one mutation.
seqpeek_data = {key: plot_data[key] for key in ['gene_label', 'protein', 'regions']}
seqpeek_tracks = []
for track in plot_data['tracks']:
if len(track['mutations']) > 0:
# Gene has to be passed to the track object, so that it can be used
# to construct the URI for the pathway association view
track['gene'] = gene
seqpeek_tracks.append(track)
else:
log.debug("{0}: 0 mutations, not rendering in SeqPeek.".format(track['label']))
seqpeek_data['tracks'] = seqpeek_tracks
tumor_list = ','.join(parsed_tumor_list)
context.update({
'search': {},
'plot_data': plot_data,
'data_bundle': json.dumps(seqpeek_data),
'gene': gene,
'tumor_list': tumor_list
})
context.update({
'static_data': json.dumps(context['static_data'])
})
return render_template(TEMPLATE_NAME, **context)
| cancerregulome/multiscale-mutation-hotspots | hotspots/seqpeek/view.py | view.py | py | 11,643 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "app_logging.get_logger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "hotspots.seqpeek.gene_list.gene_list",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": ... |
9200799444 | from fastapi import status, HTTPException, Depends
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from datetime import datetime, timedelta
from sqlalchemy.orm import Session
from .schema import TokenData
from app import database, models
from .config import env
SECRET_KEYS = env.SECRET_KEY
ALGORITHM = env.ALGORITHM
ACCESS_TOKEN_EXPIRE_MINUTES = env.ACCESS_TOKEN_EXPIRE_MINUTES
oauth2_schema = OAuth2PasswordBearer(tokenUrl="users/login")
def create_access_token(payload: dict):
to_encode = payload.copy()
expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode.update({"exp": expire})
token = jwt.encode(to_encode, SECRET_KEYS, algorithm=ALGORITHM)
return token
def verify_access_token(token: str, credentials_exception):
try:
payload = jwt.decode(token, SECRET_KEYS, algorithms=[ALGORITHM])
id: str = payload.get("user_id")
if id is None:
raise credentials_exception
token_data = TokenData(id=id)
except JWTError:
raise credentials_exception
return token_data
def get_current_user(
token: str = Depends(oauth2_schema),
db: Session = Depends(database.get_db)
):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Auth": "Bearer"}
)
token = verify_access_token(token, credentials_exception)
user = db.query(models.User).filter(models.User.id == token.id).first()
return user
| Ichi-1/FastAPI-example-api | app/oauth2.py | oauth2.py | py | 1,581 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "config.env.SECRET_KEY",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "config.env",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "config.env.ALGORITHM",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "confi... |
11670856973 |
# See subject at https://www.ilemaths.net/sujet-suite-864999.html
"""
La suite de Conway
"""
from itertools import groupby, islice
def gen_conway(germe):
"""Génère la suite de Conway à partir du germe"""
while True:
yield germe
germe = ''.join(f"{len(tuple(g))}{c}" for c, g in groupby(germe))
def main():
"""Entrée principale du programme"""
germe = input("Donner le premier terme de la suite de Conway : ")
n = int(input("Combien de termes voulez-vous calculer ? "))
for i, terme in enumerate(islice(gen_conway(germe), n+1)):
print(f"terme numéro {i}: \t{terme}")
if __name__ == "__main__":
main()
| bdaene/ilemaths | suite-864999.py | suite-864999.py | py | 667 | python | fr | code | 0 | github-code | 6 | [
{
"api_name": "itertools.groupby",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "itertools.islice",
"line_number": 23,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.