seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1322353770 | from resizer import *
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--src",
type=str,
required=True,
help="The directory of the folder with the image to be resized.",
)
parser.add_argument(
"--width", type=int, required=True, help="Width of a resized image"
)
parser.add_argument(
"--height", type=int, required=True, help="Height of a resized image"
)
parser.add_argument(
"--save_dir",
type=str,
required=False,
default=None,
help="A directory to store images.",
)
parser.add_argument(
"--inplace",
type=bool,
required=False,
default=False,
help="Whether to save the images inplace or not.",
)
args = parser.parse_args()
images, image_names, folder_name, file_list = open_images(args.src)
images = resize_images(images, args.width, args.height)
if args.inplace:
inplace_save_images(images, file_list)
elif args.save_dir is not None:
save_images(images, image_names, folder_name, args.save_dir)
else:
save_images(images, image_names, folder_name)
print("Done")
| hjk1996/Image-Resizer | main.py | main.py | py | 1,243 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
}
] |
42243204047 | import numpy as np
import argparse
import sys
import matplotlib.pyplot as plt
from datetime import datetime
max_value = 6
def classify(w, sample):
return (np.sign(np.dot(w, sample)))
def generate_dataset(num_data_points, dimension):
# generate x0 of each data point (always 1)
x0 = np.ones(shape=(num_data_points, 1))
# generate x1..xN
data_points = 2 * max_value * np.random.random(size=(num_data_points, dimension)) - max_value
# concatenate them
return np.concatenate((x0, data_points), axis=1)
def plot_data(f, data_points, labels, w):
x = np.array([-max_value, max_value])
# compute the g classifier boundary
f_line = - (f[0] + x * f[1]) / f[2]
plt.plot(x, f_line, label="f")
# compute the f classifier boundary
if w is not None:
w_line = - (w[0] + x * w[1]) / w[2]
plt.plot(x, w_line, label="g")
plt.legend()
# find the positive examples (label = 1) and negative examples (label = -1)
positive_examples = [idx for idx, label in enumerate(labels) if label == 1.0]
negative_examples = [idx for idx, label in enumerate(labels) if label == -1.0]
# plot them
plt.plot(data_points[positive_examples, 1], data_points[positive_examples, 2], "go")
plt.plot(data_points[negative_examples, 1], data_points[negative_examples, 2], "rx")
# change the plot max values (x and y)
plt.axis([-max_value, max_value, -max_value, max_value])
plt.show()
def generate_random_f(data_points, dimension):
# generate a boundary plane and check that it's inside our zone of interest
while True:
f = np.random.random(dimension+1) - 0.5
y_value = - (f[0] + 0 * f[1]) / f[2]
# if the value at 0 is inside de range (-max_value, max_value), it's good enough
if (abs(y_value) <= max_value):
break
# generate the labels for the given f
labels = [classify(f, sample) for sample in data_points]
if plot_data_flag & (dimension == 2):
plot_data(f, data_points, labels, None)
return f, labels
def train_perceptron(data_points, labels, dimension):
start = datetime.now()
# random initialization
w = np.random.random(dimension + 1) - 0.5
steps = 0
while True:
correction = False
for idx, data in enumerate(data_points):
# if there's a mistake, try to correct it
if classify(w, data) != labels[idx]:
steps += 1
w += labels[idx] * data
correction = True
# if there are no more errors, break
if correction == False:
break
time_diff = datetime.now() - start
time_diff_ms = time_diff.total_seconds() * 1000
print("Finished training in " + "{0:.5f}".format(time_diff_ms) + " milliseconds " + str(steps) + " training steps.")
return w, time_diff_ms, steps
def run(num_data_points, dimension=2):
data_points = generate_dataset(num_data_points, dimension)
f, labels = generate_random_f(data_points, dimension)
w, train_time, steps = train_perceptron(data_points, labels, dimension)
if plot_data_flag & (dimension == 2):
plot_data(f, data_points, labels, w)
return train_time, steps
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Play with a perceptron.')
parser.add_argument("num_data_points", type=int,
help='num of data points to be generated')
parser.add_argument("--D", '--dimension', dest='dimension', type=int,
help='space dimension')
parser.add_argument("--I", '--iterations', dest='iterations', type=int,
help='iterations', default=1)
args = parser.parse_args()
if args.iterations > 1:
plot_data_flag = False
else:
plot_data_flag = True
time_list = np.zeros(shape=args.iterations)
steps_list = np.zeros(shape=args.iterations)
for iteration in range(args.iterations):
if args.dimension:
train_time, steps = run(args.num_data_points, args.dimension)
else:
train_time, steps = run(args.num_data_points)
time_list[iteration] = train_time
steps_list[iteration] = steps
print()
print("Average training time: " + str(time_list.mean()) + " and variance: " + str(time_list.var()))
print("Average steps: " + str(steps_list.mean()) + " and variance: " + str(steps_list.var())) | mjuvilla/ML-UPF-Homework | H1/ml_h1.py | ml_h1.py | py | 4,452 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.sign",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_numbe... |
21247224214 | __author__ = 'yuerzx'
import csv
import pymongo
from pymongo import MongoClient
data_client = MongoClient()
data_base = data_client.Locations
#add authenticate for the MongoDB
data_base.authenticate('EZYProperty', '8jshf7asd')
super_c = data_base.supermarket
counter = 0
err_counter = 0
with open("/home/yuerzx/Desktop/woolworth_geo.csv", 'r', newline = '') as market_list:
reader = csv.reader(market_list, delimiter = ',', quoting = csv.QUOTE_MINIMAL)
next(reader)
for row in reader:
data = { "loc" :
{ "type": "Point", "coordinates": [ float(row[8]), float(row[7]) ] },
"S_Type" : row[0],
"S_Id" : row[1],
"S_Name" : row[2],
"Suburb" : row[3],
"State" : row[4],
"PCode" : row[5],
"Phone" : row[6],
"F_Address": row[9],
}
results = super_c.insert(data)
if results:
counter += 1
print("Done with %s"%row[2])
else:
err_counter += 1
print("Error")
print(results)
print("Total result is %d with %d errors"%(err_counter+counter, err_counter)) | yuerzx/python_information | supermarket_location/import_into_mongodb.py | import_into_mongodb.py | py | 1,237 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number": 15,
"usage_type": "attribute"
}
] |
74753775465 | from prophepy import Mock
from .builtin_matchers import get_matcher
from .exceptions import CustomMatcherError
from .utils import map_for_dict, reveal_if_needed
class Subject:
'''
This class represents the specced object.
'''
def __init__(self, value, object_behavior):
'''
It is instanciated with the real object, and the spec
'''
self.__value = value
self.__object_behavior = object_behavior
def _get_value(self):
'''
Get the real specced object
'''
return self.__value
def match_with_custom_matcher(self, matcher_name, matcher, *args):
'''
Launch a test against a custom matcher and raise a CustomMatcherError
if it fails
'''
if not matcher(self.__value, *args):
raise CustomMatcherError(f'Custom matcher "{matcher_name}" failed.')
return self.__value
def __getattr__(self, attr_name):
'''
If the method is a _should_ one, it will try to find a matcher
(builtin or custom one). If not, it will executes the action
on the internal specced object and return a new Subject instance.
'''
if attr_name.startswith('_should_'):
matcher_type = attr_name[len('_should_'):]
# custom matcher
if matcher_type in self.__object_behavior._matchers().keys():
matcher = self.__object_behavior._matchers()[matcher_type]
def custom_matcher_wrapper(*args):
return Subject(
self.match_with_custom_matcher(matcher_type, matcher, *args),
self.__object_behavior
)
return custom_matcher_wrapper
# builtin matcher
matcher = get_matcher(matcher_type)
def checker_wrapper(expected_value):
matcher(self.__value, expected_value)
return Subject(
self.__value,
self.__object_behavior
)
return checker_wrapper
def action_wrapper(*args, **kwargs):
args = map(reveal_if_needed, args)
kwargs = map_for_dict(reveal_if_needed, kwargs)
return Subject(
getattr(self.__value, attr_name)(*args, **kwargs),
self.__object_behavior
)
return action_wrapper
| Einenlum/specify | specify/subject.py | subject.py | py | 2,436 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "exceptions.CustomMatcherError",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "builtin_matchers.get_matcher",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "utils.reveal_if_needed",
"line_number": 63,
"usage_type": "argument"
},
{
... |
69829306344 | from torchvision import transforms, datasets
import h5py
import os
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
#参考:https://blog.csdn.net/shwan_ma/article/details/100012808
#https://github.com/pbizopoulos/signal2image-modules-in-deep-neural-networks-for-eeg-classification/blob/master/dataset.py
class DataFromMat(Dataset):
def __init__(self, filepath, training_test , standardize=True):
electrodes = 22 #22路脑电电极
X, y = [], []
#------------------加载所有的.mat数据------------------
for i in range(9):
A01T = h5py.File(filepath +'A0'+ str(i + 1) + 'T_slice.mat', 'r')
X1 = np.copy(A01T['image'])
X1 = X1[:, :electrodes, :]
X.append(np.asarray(X1,dtype=np.float32))
y1 = np.copy(A01T['type'])
y1 = y1[0, 0:X1.shape[0]:1] #每个对象每次试验的标签
y.append(np.asarray(y1, dtype=np.int32))
#-----------------------删除受试对象中存在空值的某次实验-------------------------
for subject in range(9):
delete_list = [] #删除列表,删除存在空值的某次实验
for trial in range(288):
if np.isnan(X[subject][trial, :, :]).sum() > 0:
delete_list.append(trial)
# print('delete_list',delete_list)
X[subject] = np.delete(X[subject], delete_list, 0)
y[subject] = np.delete(y[subject], delete_list)
y = [y[i] - np.min(y[i]) for i in range(len(y))] #9个对象的标签,转换成0,1,2,3
#把所有人的脑电信号都放在一起
signals_all = np.concatenate((X[0], X[1], X[2], X[3], X[4], X[5], X[6], X[7], X[8])) #信号
labels_all = np.concatenate((y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7], y[8])) #标签
# print('signals_all.shape',signals_all.shape)
# print('labels_all.shape',labels_all.shape)
last_training_index = int(signals_all.shape[0]*0.8)
#--------------按照0.8/0.2的比例划分训练/测试---------------
if training_test == 'train':
self.data = torch.tensor(signals_all[:last_training_index, :], dtype=torch.float)
self.labels = torch.tensor(labels_all[:last_training_index])
elif training_test == 'test':
self.data = torch.tensor(signals_all[last_training_index:, :], dtype=torch.float)
self.labels = torch.tensor(labels_all[last_training_index:])
#如果是标准化的,则减去均值,并除以方差
if standardize:
data_mean = self.data.mean(0)
data_var = np.sqrt(self.data.var(0))
self.data = (self.data -data_mean)/data_var
def __getitem__(self, idx):
data = self.data[idx]
label = self.labels[idx]
return data,label
def __len__(self):
return self.data.shape[0]
def get_data(filepath, standardize=True):
train_dataset = DataFromMat(filepath, 'train')
test_dataset = DataFromMat(filepath, 'test')
train_loaders = DataLoader(train_dataset, batch_size=64,shuffle=True, num_workers=4)
test_loaders = DataLoader(test_dataset, batch_size=64,shuffle=True, num_workers=4)
train_sizes = len(train_dataset)
test_sizes = len(test_dataset)
return train_loaders, test_loaders,train_sizes,test_sizes
if __name__ == '__main__':
filepath = "./data/"
#将一个的部分数据作为测试集
train_loader,test_loader = get_data(filepath)
for signals, labels in test_loader:
print('signals.shape',signals.shape)
print('labels.shape',labels.shape)
| im-wll/EEG-process | dataset/dataloader.py | dataloader.py | py | 3,865 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.utils.data.dataset.Dataset",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "h5py.File",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",... |
70808020583 | from flask import Flask, request, jsonify
from a_entities.bank_account import BankAccount
from a_entities.customer import Customer
from b_data_access_layer.postgres_bank_account_dao import BankAccountPostgresDAO
from b_data_access_layer.postgres_customer_dao import CustomerPostgresDAO
from c_service_layer.postgres_bank_account_service import BankAccountPostgresService
from c_service_layer.postgres_customer_service import CustomerPostgresService
from c_service_layer.custom_exceptions import *
import logging
logging.basicConfig(filename="records.log", level=logging.DEBUG, format=f"%(asctime)s %(levelname)s %(message)s")
# Created the Flask object to use flask environment. Also created the DAO and the Service layer instances so that all
# of the information for both layers are available here.
app = Flask(__name__)
customer_dao = CustomerPostgresDAO()
customer_service = CustomerPostgresService(customer_dao)
bank_account_dao = BankAccountPostgresDAO()
bank_account_service = BankAccountPostgresService(bank_account_dao)
@app.post("/customer")
def create_customer():
try:
# We retrieve the request that the API sent to this server.
customer_data = request.get_json()
# We format the data so that it is read correctly by the server. The API user is passing their information to us
# so we need to give the database a way to read it.
new_customer = Customer(customer_data["firstName"], customer_data["lastName"], customer_data["customerId"])
# We pass this retrieved and formatted data into our service layer.
customer_to_return = customer_service.service_create_customer(new_customer)
# The objects crunched by the DAO and service layers are passed back to the server and turned into a dictionary.
customer_as_dictionary = customer_to_return.customer_dictionary()
# Converting the dictionary into a JSON.
customer_as_json = jsonify(customer_as_dictionary)
# Sending the jsonified dictionary to the user (Postman).
return customer_as_json
except WrongInformationException as w:
exception_dictionary = {"Message" : str(w)}
jsonify_exception = jsonify(exception_dictionary)
return jsonify_exception
@app.post("/account")
def create_bank_account():
account_data = request.get_json()
new_account = BankAccount(account_data["accountId"], account_data["customerId"], account_data["balance"])
account_to_return = bank_account_service.service_create_bank_account(new_account)
account_as_dictionary = account_to_return.bank_account_dictionary()
account_as_json = jsonify(account_as_dictionary)
return account_as_json
@app.get("/customer/<customer_id>")
def get_customer_information(customer_id: str):
# There is no body returned to the server with this verb there is only the request to send information back out to
# the API.
result = customer_service.service_get_customer_information(int(customer_id))
result_as_dictionary = result.customer_dictionary()
result_as_json = jsonify(result_as_dictionary)
return result_as_json
@app.get("/account/<account_id>")
def get_account_information(account_id: str):
account_info = bank_account_service.service_view_bank_account(int(account_id))
info_as_dictionary = account_info.bank_account_dictionary()
info_as_json = jsonify(info_as_dictionary)
return info_as_json
@app.patch("/customer/<customer_id>")
def update_customer_information(customer_id: str):
customer_data = request.get_json()
new_customer = Customer(customer_data["firstName"],
customer_data["lastName"],
int(customer_id))
customer_service.service_update_customer_information(new_customer)
return "Hooray! Customer with id {} updated successfully.".format(customer_id)
@app.patch("/account/deposit/<account_id>/<balance>")
def deposit(account_id: str, balance: str):
money_data = request.get_json()
new_balance = BankAccount(int(account_id), money_data["customerId"], money_data["balance"])
bank_account_service.service_deposit(int(balance), new_balance)
return "The balance in account {} has been updated.".format(account_id)
# Database, Postman not catching the insufficient funds exception!!!!
@app.patch("/account/withdraw/<account_id>/<balance>")
def withdraw(account_id: str, balance: str):
try:
# The request from the API comes in as string information so the account id and balance has to be converted back
# to the proper data types into the method.
# The front end is not sending us a body of information so we don't need to do the request.get_json function.
bank_account_service.service_withdraw(int(account_id), float(balance))
return "The balance in account {} has been updated.".format(account_id)
except InsufficientFundsException as i:
exception_dictionary = {"Message": str(i)}
jsonify_exception = jsonify(exception_dictionary)
return jsonify_exception
@app.patch("/account/<account_one>/<account_two>/<balance>")
def transfer_funds(account_one: str, account_two: str, balance: str):
try:
transfer_data = request.get_json()
transfer_one = BankAccount(int(account_one), transfer_data["customerId"], transfer_data["balance"])
transfer_two = BankAccount(int(account_two), transfer_data["customerId"], transfer_data["balance"])
bank_account_service.service_transfer_funds(int(balance), transfer_one, transfer_two)
return "The transfer of ${} has been completed.".format(balance)
except InsufficientFundsException as i:
exception_dictionary = {"Message" : str(i)}
jsonify_exception = jsonify(exception_dictionary)
return jsonify_exception
@app.get("/customer")
def view_all_customers():
# The front end is not sending us a body of information so we don't need to do the request.get_json function.
all_customers = customer_service.service_view_all_customers()
customers_as_dictionaries = []
for cust in all_customers:
dictionary_customers = cust.customer_dictionary()
customers_as_dictionaries.append(dictionary_customers)
return jsonify(customers_as_dictionaries)
@app.get("/account/<customer_id>")
def view_accounts_per_customer(customer_id: str):
customer_accounts = bank_account_service.service_view_accounts_per_customer(int(customer_id))
cust_accounts_as_dictionaries = []
for cust in customer_accounts:
cust_dictionary_accounts = cust.bank_account_dictionary()
cust_accounts_as_dictionaries.append(cust_dictionary_accounts)
return jsonify(cust_accounts_as_dictionaries)
@app.get("/account")
def view_all_bank_accounts():
all_accounts = bank_account_service.service_view_all_bank_accounts()
accounts_as_dictionaries = []
for account in all_accounts:
dictionary_accounts = account.bank_account_dictionary()
accounts_as_dictionaries.append(dictionary_accounts)
return jsonify(accounts_as_dictionaries)
@app.delete("/customer/<customer_id>")
def delete_customer(customer_id: str):
try:
customer_service.service_delete_customer(int(customer_id))
return "Customer with id {} has been deleted.".format(customer_id)
except DeletionErrorException as d:
exception_dictionary = {"Message" : str(d)}
jsonify_exception = jsonify(exception_dictionary)
return jsonify_exception
@app.delete("/account/<account_id>")
def delete_bank_account(account_id: str):
bank_account_service.service_delete_bank_account(int(account_id))
return "Bank account with id {} has been deleted.".format(account_id)
app.run() | bluedragonscales/project0_banking | main.py | main.py | py | 7,753 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "b_data_access_laye... |
24537866239 | from operator import itemgetter, add
from pathlib import Path
banks = list(map(int, Path("day6.txt").read_text().split()))
n, history = len(banks), {}
while tuple(banks) not in history:
history[tuple(banks)] = len(history)
i, mx = max(enumerate(banks), key = itemgetter(1))
banks[i] = 0
for i in range(i + 1, i + 1 + mx):
banks[i % len(banks)] += 1 # mx ended up being small, so this is fine
if False:
div, rem = divmod(mx, n)
banks[:] = map(add, map(add, banks, [div] * n), [0 if (i - (n - rem)) < j <= i or (i - (n - rem) + 1 < 0 and n - ((n - rem) - i) < j < n) else 1 for j in range(n)])
print(len(history), len(history) - history[tuple(banks)])
| AlexBlandin/Advent-of-Code | 2017/day6.py | day6.py | py | 675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "operator.add",
"line_number": 15,
"usage_type": "argument"
}
] |
8882346001 | import simplejson as json
from datetime import datetime
DEBUG = False
# Zigbee catch-all decoder, just adds the following properties:
# Only changes topic:
# csn-zigbee/acp_id -> acp/acp_id/csn-zigbee
class Decoder(object):
def __init__(self, settings=None):
print(" zigbee_catchall init()")
return
def test(self, topic, message_bytes):
if DEBUG:
print("zigbee_catchall test() {} {}".format(topic, message_bytes))
#regular topic format:
#cambridge-sensor-network/devices/zigbee_catchall-test-3/up
if ("csn-zigbee" in topic): #check if application name appears in the topic
if DEBUG:
print("zigbee_catchall test() success")
return True
#elif ("dev_id" in msg): #dev_id for example, can be any other key
# msg=json.loads(message.payload)
# if (decoder_name in msg["dev_id"]):
# return True
# #elif...
# else:
# return False
if DEBUG:
print("zigbee_catchall test() fail")
return False
def decode(self, topic, message_bytes):
inc_msg = str(message_bytes,'utf-8')
if DEBUG:
print("zigbee_catchall decode str {}".format(inc_msg))
# Zigbee topic is "csn-zigbee/<acp_id>[/<other stuff>]"
topic_parts = topic.split('/',2) # split into max 4 topic_parts
output_topic = "acp/"+topic_parts[1]+"/"+topic_parts[0]
if len(topic_parts) > 2:
output_topic += "/" + topic_parts[2]
# For this version of the decoder the original message from
# deconz2acp will be published unchanged.
msg_dict = json.loads(message_bytes)
return msg_dict
# end zigbee_catchall
| AdaptiveCity/acp_local_mqtt | acp_decoders/decoders/zigbee_catchall.py | zigbee_catchall.py | py | 1,788 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "simplejson.loads",
"line_number": 54,
"usage_type": "call"
}
] |
29391322952 | from collections import defaultdict
class Solution:
def longestStrChain(self, words: List[str]) -> int:
n = len(words)
words.sort(key=lambda word: len(word))
graph = defaultdict(set)
for i, word in enumerate(words):
for j in range(len(word)):
graph[word[:j] + word[j + 1:]].add(i)
dists = [1] * n
res = 1
for u in range(n):
for v in graph[words[u]]:
dists[v] = max(dists[v], dists[u] + 1)
res = max(res, dists[v])
return res
| AnotherPianist/LeetCode | 1129-longest-string-chain/1129-longest-string-chain.py | 1129-longest-string-chain.py | py | 587 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
}
] |
21254500663 | import json
import boto3
from datetime import datetime
current_date_time = datetime.now()
sqs = boto3.resource('sqs', region_name='us-east-1')
def lambda_handler(event, context):
queue = sqs.get_queue_by_name (QueueName='CustomerOrders')
date_time = current_date_time.strftime("%d/%m/%Y %H:%M:%S")
message = ("The current date and time at point of trigger was " + str(date_time) + ".")
response = queue.send_message (MessageBody=message)
return {
'statusCode': 200,
'body': json.dumps(message)
} | tmachek98/python-boto3 | Lambda.py | Lambda.py | py | 563 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "boto3.resource",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "json.dumps",
... |
11875963511 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 17 12:34:56 2019
@author: stark
"""
import requests
from PageLinker import LinkFinder
from domain import *
from utility import *
class Spider:
projectName = ''
baseURL = ''
domainName = ''
queueFile = ''
crawledFile = ''
queue = set()
crawled = set()
failed = set()
def __init__(self,projectName,baseURL,domainName):
Spider.projectName = projectName
Spider.baseURL = baseURL
Spider.domainName = domainName
Spider.queueFile = pathJoin(Spider.projectName,'queue.txt')
Spider.crawledFile = pathJoin(Spider.projectName,'crawled.txt')
Spider.boot()
Spider.crawlPage('First Page', Spider.baseURL)
#Creates directory and files for the first run and starts the spider
@staticmethod
def boot():
createProjectDir(Spider.projectName)
createDataFiles(Spider.projectName,Spider.baseURL)
Spider.queue = fileToSet(Spider.queueFile)
Spider.crawled = fileToSet(Spider.crawledFile)
Spider.queue.add(Spider.baseURL)
#Updates user display, fills queue and update files
@staticmethod
def crawlPage(threadName,pageURL):
if pageURL not in Spider.crawled:
print(threadName +': now crawling : '+ pageURL)
print('Queue : ' + str(len(Spider.queue)) + ' | Crawled : ' + str(len(Spider.crawled)))
Spider.queue.remove(pageURL)
Spider.addLinksToQueue(Spider.gatherLinks(pageURL))
Spider.crawled.add(pageURL)
Spider.updateFiles()
#COnverts raw response data into readable information and checks for proper html formating
@staticmethod
def gatherLinks(pageURL):
try:
response = requests.get(pageURL)
if response.status_code == 200:
if 'text/html' in response.headers['Content-Type']:
response.encoding = 'UTF-8'
htmlString = response.text
finder = LinkFinder(Spider.baseURL,pageURL,Spider.projectName)
finder.feeder(htmlString)
else:
return set()
else:
raise Exception('Request staus code' , response.status_code)
except Exception as e:
print(str(e))
if(pageURL not in Spider.failed):
Spider.queue.add(pageURL)
Spider.failed.add(pageURL)
print(Spider.failed)
return set()
return finder.returnLinks()
#Save queue data to project files
@staticmethod
def addLinksToQueue(links):
for url in links:
if (url in Spider.queue) or (url in Spider.crawled):
continue
if(Spider.domainName != get_domain_name(url)):
continue
Spider.queue.add(url)
@staticmethod
def updateFiles():
setToFile(Spider.queueFile,Spider.queue)
setToFile(Spider.crawledFile,Spider.crawled)
| pandafy/WebCrawler | spider.py | spider.py | py | 3,354 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "PageLinker.LinkFinder",
"line_number": 64,
"usage_type": "call"
}
] |
1653425451 | import numpy
import matplotlib.pyplot as plt
import pylab
import dcf
import utility as util
import logistic_regression as lr
import svm
from tqdm import tqdm
from copy import deepcopy
from preprocessing import preprocess_Z_score
import matplotlib
# ======================================== FEATURES plots ==========================================
def plot_features_distr(D, labels, features, gau=False):
n_features = len(features)
_gau = "gau-" if gau else ""
males = D[:, labels == 0]
females = D[:, labels == 1]
bins = 30
for feature in range(n_features):
plt.Figure()
plt.xlabel(features[feature])
dataset_m = males[feature, :]
dataset_f = females[feature, :]
plt.hist(dataset_m, bins=bins, density=True, label='male', alpha=0.4)
plt.hist(dataset_f, bins=bins, density=True, label='female', alpha=0.4)
plt.legend()
plt.savefig(f"./plots/features/{_gau}/{features[feature]}.png", format="png")
plt.show()
def plot_relation_beetween_feautures(D, labels, features):
n_features = len(features)
males = D[:, labels == 0]
females = D[:, labels == 1]
for featureA in range(n_features):
for featureB in range(featureA, n_features):
if featureA == featureB:
continue
plt.figure()
plt.xlabel(labels[featureA])
plt.ylabel(labels[featureB])
plt.scatter(males[featureA, :], males[featureB, :], label='Male', alpha=0.4)
plt.scatter(females[featureA, :], males[featureB, :], label='Female', alpha=0.4)
plt.legend()
plt.show()
# ============================================ CORRELATION between features plots ======================================================
def pearson_coeff(x, y):
"""
Given two arrays evaluate the Pearson coefficient
Parameters
---------
x: numpy.array
first array
y: numpy.array
second array
"""
cov = numpy.cov(x, y)[0][1]
x_var = numpy.var(x)
y_var = numpy.var(y)
return numpy.abs(cov / (numpy.sqrt(x_var) * numpy.sqrt(y_var)))
def plot_heatmap(D, features, color):
"""
Plot the heatmap of a given dataset. This heat map will show the pearson coefficient between all the feauters.
Parameters
---------
D: dataset
color: an optional value with the color of the heatmap
"""
n_features = len(features)
coeffs = numpy.zeros((n_features, n_features))
# evaluate the person coefficient for each feature
for i in range(n_features):
for j in range(n_features):
coeffs[i][j] = pearson_coeff(D[i, :], D[j, :])
# plot the heat map
fig, ax = plt.subplots()
im = ax.imshow(coeffs, interpolation='nearest', cmap=color)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(coeffs)):
for j in range(len(coeffs)):
text = ax.text(j, i, numpy.around(coeffs[i, j],2),
ha="center", va="center", color="w")
ax.set_title("Heat map")
fig.tight_layout()
plt.show()
# ================================================= MIN DCFs Plots ============================================================================
def compare_min_DCF_logreg(DTR, DTE, LTR, LTE, applications, quadratic=False, preprocessing=False, weighted=False):
lambdas = [1e-6, 2e-6, 5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 8e-3, 1e-2, 2e-2, 5e-2, 1e-1, 0.3, 0.5, 1, 5, 10, 50, 100]
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)']
quadratic_ = 'quadratic' if quadratic else 'linear'
colors = ['b', 'r', 'g']
params = {
'weighted' : weighted
}
max_y = 0
DCFs_dict = dict()
file_prefix = lr.compute_filename_prefix(quadratic, preprocessing, weighted)
train_minDCFs, train_lambdas = lr.load_results(file_prefix)
PATH = f"./plots/LogReg/experimental/{file_prefix}-minDCF.png"
for i, application in enumerate(applications):
pi, Cfn, Cfp = application
params['priors'] = [pi, 1-pi]
DCFs = lr.compute_minDCF_for_lambda(DTR, DTE, LTR, LTE, application, lambdas, quadratic, params)
DCFs_dict[application] = DCFs
max_y = max(max_y, numpy.amax(numpy.hstack((train_minDCFs[application], DCFs))))
plt.plot(train_lambdas, train_minDCFs[application], color=colors[i], label=f"{app_labels[i]} [Val]", linestyle='dashed')
plt.plot(lambdas, DCFs, color=colors[i], label=f"{app_labels[i]} [Eval]")
plt.ylim(0, max_y + 0.05)
plt.xscale('log')
plt.title(f"DCF {quadratic_} logistic regression")
plt.xlabel('lambda')
plt.ylabel('DCF')
plt.legend()
plt.savefig(PATH, format='png')
plt.show()
return lambdas, DCFs_dict
def plot_min_DCF_logreg(folds, folds_labels, k, applications, quadratic=False, preprocessing=False, weighted=False):
lambdas = [1e-6, 2e-6, 5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 8e-3, 1e-2, 2e-2, 5e-2, 1e-1, 0.3, 0.5, 1, 5, 10, 50, 100]
#lambdas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 5e-2, 1e-1, 0.3, 0.5, 1, 5, 10]
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)']
colors = ['b', 'r', 'g']
max_y = 0
quadratic_ = "quadratic" if quadratic else "linear"
file_prefix = lr.compute_filename_prefix(quadratic, preprocessing, weighted)
PATH = f"./plots/LogReg/{file_prefix}-minDCF.png"
DCFs_dict = {}
max_y = 0
for i, application in enumerate(applications):
DCFs = []
pi, Cfn, Cfp = application
classPriors = [pi, 1-pi]
for l in tqdm(lambdas):
if not quadratic:
STE = util.k_folds(folds, folds_labels, k, lr.logreg, priors=classPriors, lambda_=l, preprocessing=preprocessing, weighted=weighted)
else:
STE = util.k_folds(folds, folds_labels, k, lr.quadratic_logreg, priors=classPriors, lambda_=l, preprocessing=preprocessing, weighted=weighted)
scores = numpy.hstack(STE)
DCF = dcf.compute_min_DCF(scores, numpy.hstack(folds_labels), pi, Cfn, Cfp)
max_y = max(max_y, DCF)
DCFs.append(DCF)
DCFs_dict[application] = DCFs
plt.plot(lambdas, DCFs, color=colors[i], label=app_labels[i])
plt.ylim(0, max_y+0.1)
plt.xscale('log')
plt.title(f"DCF {quadratic_} logistic regression")
plt.xlabel('lambda')
plt.ylabel('DCF')
plt.legend()
plt.savefig(PATH, format='png')
plt.show()
return lambdas, DCFs_dict
# ================================================= MIN DCFs SVM Plots ============================================================================
def compare_min_DCF_svm(DTR, DTE, LTR, LTE, kernel:str, evaluation_points: tuple, balanced: bool, preprocessing: bool):
#plot features
#Cs = [0.005, 0.02,0.05, 0.10, 0.20, 0.30, 0.5, 0.8, 1, 5, 10, 20, 50]
Cs = [0.005, 0.05, 0.1, 0.5, 1, 5]
colors = ['b', 'r', 'g']
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)'] if kernel != 'rbf' else ['log(\u03BB)=-1', 'log(\u03BB)=-2', 'log(\u03BB)=-3']
balanced_ = "balanced" if balanced else "not balanced"
file_prefix = svm.compute_filename_prefix(balanced, preprocessing)
train_minDCFs, train_Cs = svm.load_results(file_prefix, kernel)
PATH = f"./plots/SVM/experimental/{kernel}-{file_prefix}-minDCF.png"
max_y = 0
minDCFs_dict = dict()
for i, ep in enumerate(evaluation_points):
DCFs = []
if kernel == 'linear':
pi, Cfn, Cfp = ep
params = util.build_params(priors=[pi, 1-pi], balanced=balanced, kernel=kernel)
elif kernel == 'poly':
pi, Cfn, Cfp = ep
params = util.build_params(priors=[pi, 1-pi], balanced=balanced, kernel=kernel, d=2, c=1,)
elif kernel == 'rbf':
params = util.build_params(priors=[0.5, 0.5], balanced=balanced, kernel=kernel, gamma=ep)
minDCFs = svm.compute_minDCF_for_parameter(DTR, DTE, LTR, LTE, ep, Cs, params)
minDCFs_dict[ep] = minDCFs
max_y = max(max_y, numpy.amax(numpy.hstack((train_minDCFs[ep], minDCFs))))
minDCFs = numpy.array(minDCFs).ravel()
plt.plot(Cs, minDCFs, color=colors[i], label=f"{app_labels[i]} [Eval]")
train_minDCF = numpy.array(train_minDCFs[ep]).ravel()
plt.plot(train_Cs, train_minDCF, color=colors[i], label=f"{app_labels[i]} [Val]", linestyle='dashed' )
plt.ylim(0, max_y+0.05)
plt.title(f"minDCF for {kernel} SVM ({balanced_})")
plt.xscale('log')
plt.xlabel('C')
plt.ylabel('DCF')
plt.legend()
plt.savefig(PATH, format="png")
plt.show()
return Cs, minDCFs_dict
def plot_min_DCF_svm(folds, folds_labels, k, applications, balanced=False, preprocessing=None):
balanced_ = "balanced" if balanced else "not balanced"
preprocessing_ = preprocessing if preprocessing else "raw"
PATH = f"./plots/SVM/{preprocessing_}-linear-{balanced_}-minDCF.png"
Cs = [0.005, 0.02,0.05, 0.10, 0.20, 0.30, 0.5, 0.8, 1, 5, 10, 20, 50]
colors = ['b', 'r', 'g']
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)']
minDCFs_dict = {}
max_y = 0
for i, application in enumerate(applications):
DCFs = []
pi, Cfn, Cfp = application
classPriors = [pi, 1-pi]
for C in tqdm(Cs):
scores = util.k_folds(folds, folds_labels, k, svm.train_SVM_linear,SVM=True, C = C, balanced=balanced, preprocessing=preprocessing)
scores = numpy.hstack(scores)
minDCF = dcf.compute_min_DCF(scores, numpy.hstack(folds_labels), pi, Cfn, Cfp)
DCFs.append(minDCF)
DCFs = numpy.array(DCFs)
minDCFs_dict[application] = DCFs.ravel()
plt.plot(Cs, DCFs.ravel(), color=colors[i], label=app_labels[i])
plt.ylim(0, 1)
plt.title(f"minDCF for linear SVM ({balanced_})")
plt.xscale('log')
plt.xlabel('C')
plt.ylabel('DCF')
plt.legend()
plt.savefig(PATH, format="png")
plt.show()
return Cs, minDCFs_dict
def plot_min_DCF_poly_svm(folds, folds_labels, k, applications, degree=2.0, balanced=False, preprocessing=None):
balanced_ = "balanced" if balanced else "not balanced"
preprocessing_ = "z-norm" if preprocessing else "raw"
PATH = f"./plots/SVM/{preprocessing_}-poly{int(degree)}-{balanced_}-minDCF.png"
Cs = [0.005, 0.05, 0.1, 0.5, 1, 5]
colors = ['b', 'r', 'g']
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)']
minDCFs_dict = {}
for i, application in enumerate(applications):
DCFs = []
pi, Cfn, Cfp = application
classPriors = [pi, 1-pi]
for C in tqdm(Cs):
scores = util.k_folds(folds, folds_labels, k, svm.train_non_linear_SVM, SVM=True, kernel='poly', C=C, d=degree, c=1, balanced=balanced, preprocessing=preprocessing)
scores = numpy.hstack(scores)
minDCF = dcf.compute_min_DCF(scores, numpy.hstack(folds_labels), pi, Cfn, Cfp)
DCFs.append(minDCF)
DCFs = numpy.array(DCFs)
minDCFs_dict[application] = DCFs.ravel()
plt.ylim(0, 1)
plt.title(f"DCF for Poly(d={int(degree)}) SVM ({balanced_})")
plt.xscale('log')
plt.xlabel('C')
plt.ylabel('DCF')
plt.plot(Cs, DCFs.ravel(), color=colors[i], label=app_labels[i])
plt.legend()
plt.savefig(PATH, format="png")
plt.show()
return Cs, minDCFs_dict
def plot_min_DCF_RBFsvm(folds, folds_labels, k, gammas, balanced=False, preprocessing=False):
balanced_ = "balanced" if balanced else "not-balanced"
preprocessing_ = "z-norm" if preprocessing else "raw"
PATH = f"./plots/SVM/{preprocessing_}-RBF-{balanced_}-minDCF.png"
Cs = [0.005, 0.01,0.02,0.05, 0.08, 0.10, 0.20, 0.30, 0.5, 0.8, 1, 3, 5, 10, 20, 50]
colors = ['b', 'r', 'g']
app_labels = ['log(\u03B3)=-1', 'log(\u03B3)=-2', 'log(\u03B3)=-3']
minDCFs_dict = {}
for i,gamma in enumerate(gammas):
DCFs = []
pi, Cfn, Cfp = (0.5, 1, 1)
classPriors = [pi, 1-pi]
for C in tqdm(Cs):
scores = util.k_folds(folds, folds_labels, k, svm.train_non_linear_SVM, SVM=True, kernel='rbf', gamma=gamma, C=C, balanced=balanced, preprocessing=preprocessing)
scores = numpy.hstack(scores)
minDCF = dcf.compute_min_DCF(scores, numpy.hstack(folds_labels), pi, Cfn, Cfp)
DCFs.append(minDCF)
DCFs = numpy.array(DCFs)
minDCFs_dict[gamma] = DCFs.ravel()
plt.ylim(0, 1)
plt.title("DCF for RBF kernel SVM")
plt.xscale('log')
plt.xlabel('C')
plt.ylabel('DCF')
plt.plot(Cs, DCFs.ravel(), color=colors[i], label=app_labels[i])
plt.legend()
plt.savefig(PATH, format="png")
plt.show()
return Cs, minDCFs_dict
# ================================================= MIN DCFs GMM Plots ============================================================================
def plot_minDCF_GMM_hist(DCFs_list: list, G: int, labels: list, filename='plot', experimental= False, title="", colors=['lightsalmon', 'orangered', 'gold', 'orange']):
x_labels = list(map(lambda val:2**val, range(G)))
x = numpy.arange(len(x_labels))
width = 0.18
_experimental = "experimental/" if experimental else ""
path = f"./plots/GMM/{_experimental}{filename}.png"
n_hists = len(DCFs_list)
offsets = list( range(-int(n_hists/2) - 1, int(n_hists/2) + 2, 2))
print("n_hist:", n_hists, "offsets", offsets)
fig, ax = plt.subplots()
for DCFs, offset, label, color in zip(DCFs_list, offsets, labels, colors):
ax.bar(x + offset*width/2, DCFs, width, label=label, color=color)
ax.set_ylabel('DCF')
ax.set_xticks(x, x_labels)
ax.legend()
ax.set_title(title)
fig.tight_layout()
plt.savefig(path, format='png')
plt.show()
# ================================================================ DET Plot ===================================================================
def plot_DET(llrs:list, L: numpy.array, plot_labels:list, colors: list =['r', 'b', 'm', 'g', 'y'], save_figure:bool = True, training:bool = True, multiple_labels: bool = False):
training_ = "training" if training else "experimental"
models = "-".join(plot_labels)
PATH = f"./plots/evaluation/{training_}/DET_{models}.png"
fig,ax = plt.subplots()
if not multiple_labels:
for llr, plot_label, color in zip(llrs, plot_labels, colors):
print(plot_label)
DET_points_FNR, DET_points_FPR = compute_DET_points(llr, L)
ax.plot(DET_points_FNR, DET_points_FPR, color=color, label=plot_label)
else:
for llr, lbl, plot_label, color in zip(llrs, L, plot_labels, colors):
DET_points_FNR, DET_points_FPR = compute_DET_points(llr, lbl)
ax.plot(DET_points_FNR, DET_points_FPR, color=color, label=plot_label)
ax.set_xlabel("FPR")
ax.set_ylabel("FNR")
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend()
if save_figure:
plt.savefig(PATH, format='png')
plt.show()
def compute_DET_points(llr, L):
tresholds = numpy.concatenate([numpy.array([-numpy.inf]),numpy.sort(llr),numpy.array([numpy.inf])])
N_label0 = (L == 0).sum()
N_label1 = (L == 1).sum()
DET_points_FNR = numpy.zeros(L.shape[0] +2 )
DET_points_FPR = numpy.zeros(L.shape[0] +2 )
for (idx,t) in enumerate(tresholds):
pred = 1 * (llr > t)
FNR = 1 - (numpy.bitwise_and(pred == 1, L == 1 ).sum() / N_label1)
FPR = numpy.bitwise_and(pred == 1, L == 0).sum() / N_label0
DET_points_FNR[idx] = FNR
DET_points_FPR[idx] = FPR
return DET_points_FNR, DET_points_FPR
# =============================================== ROC Plots ==================================================
def plot_ROC(llrs: list, labels: list, plot_labels: list, save_figure:bool = True, training:bool = True):
training_ = "training" if training else "experimental"
models = "-".join(plot_labels)
PATH = f"./plots/evaluation/{training_}/ROC_{models}.png"
for llr, plot_label in zip(llrs, plot_labels):
ROC_points_TPR, ROC_points_FPR = compute_ROC_points(llr, labels)
plt.plot(ROC_points_FPR, ROC_points_TPR, label=plot_label)
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.legend()
plt.grid()
if save_figure:
plt.savefig(PATH, format='png')
plt.show()
def compute_ROC_points(llr, L):
tresholds = numpy.concatenate([numpy.array([-numpy.inf]),numpy.sort(llr),numpy.array([numpy.inf])])
N_label0 = (L == 0).sum()
N_label1 = (L == 1).sum()
ROC_points_TPR = numpy.zeros(L.shape[0] +2 )
ROC_points_FPR = numpy.zeros(L.shape[0] +2 )
for (idx,t) in enumerate(tresholds):
pred = 1 * (llr > t)
TPR = numpy.bitwise_and(pred == 1, L == 1 ).sum() / N_label1
FPR = numpy.bitwise_and(pred == 1, L == 0).sum() / N_label0
ROC_points_TPR[idx] = TPR
ROC_points_FPR[idx] = FPR
return ROC_points_TPR, ROC_points_FPR
# =========================================================== Bayes Error Plot =============================================================
def bayes_error_plot(llrs: list, labels: list, plot_labels: list, log_regs: list, n_points:int = 100, colors: list = ['r', 'b', 'g', 'm', 'y'], save_figure: bool = True, training:bool = True, calibrated: bool = False, multiple_labels:bool = False):
training_ = "training" if training else "experimental"
models = "-".join(plot_labels)
calibrated_ = "-calibrated" if calibrated else ""
PATH = f"./plots/evaluation/{training_}/BEP_{models}{calibrated_}.png"
max_y = 0
if not multiple_labels:
for llr, plot_label, log_reg, color in zip(llrs, plot_labels, log_regs, colors):
p_array = numpy.linspace(-3, 3, n_points)
minDCFs = dcf.bayes_error_points(p_array, llr, labels, True, log_reg)
max_y = max(max_y, numpy.max(minDCFs))
actDCFs = dcf.bayes_error_points(p_array, llr, labels, False, log_reg)
max_y = max(max_y, numpy.max(actDCFs))
plt.plot(p_array, minDCFs, label=f"{plot_label} minDCF", color=color, linestyle='dashed')
plt.plot(p_array, actDCFs, label=f"{plot_label} actDCF", color=color)
else:
for llr, lbl, plot_label, log_reg, color in zip(llrs, labels, plot_labels, log_regs, colors):
p_array = numpy.linspace(-3, 3, n_points)
minDCFs = dcf.bayes_error_points(p_array, llr, lbl, True, log_reg)
max_y = max(max_y, numpy.max(minDCFs))
actDCFs = dcf.bayes_error_points(p_array, llr, lbl, False, log_reg)
max_y = max(max_y, numpy.max(actDCFs))
plt.plot(p_array, minDCFs, label=f"{plot_label} minDCF", color=color, linestyle='dashed')
plt.plot(p_array, actDCFs, label=f"{plot_label} actDCF", color=color)
title = "Bayes Error Plot"
plt.yticks(numpy.arange(0, min(max_y+0.1, 1), 0.05))
plt.title(title)
plt.legend()
if save_figure:
plt.savefig(PATH, format='png')
plt.show()
| srrmtt/GenderVoiceDetection | plot.py | plot.py | py | 19,657 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.Figure",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "m... |
3273808823 | # This is a sample Python script.
#########################################: Please Don't Change :#######################################
import logging
import os
import sys
from datetime import datetime
sys.path.append(
"/home/sonu/workspace/pro/component/"
)
sys.path.append(
"/home/sonu/workspace/pro/utils/"
)
sys.path.append(
"/home/sonu/workspace/pro/db_conn/"
)
from common import get_logger
from db_conn import DatabaseConnection
def log_setup():
"""This funtion is require for log_confi.yaml file."""
path = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(path, "log")
os.makedirs(log_dir, exist_ok=True)
log_path = os.path.join(path, log_dir, "running_log.log")
filelog = logging.handlers.TimedRotatingFileHandler(
log_path, when="midnight", backupCount=5
)
return filelog
#########################################: Please Code write Below :#######################################
STAGE_01 = "Connection Establish"
STAGE_02 = ""
STAGE_03 = ""
STAGE_04 = ""
def main():
logger = get_logger(logger_name="sample")
logger.info("main logging initialized")
try:
start_time = datetime.now()
logger.info(f"<<<<<<< The start of {STAGE_01} has begun. >>>>>>>")
database_connection = DatabaseConnection()
snowflake_connection = database_connection.get_snowflake_connection()
logger.info(f"<<<<<<< {STAGE_01} has been completed. >>>>>>>")
cux = snowflake_connection.cursor()
cux.execute("select current_timestamp();")
result = cux.fetchone()
logger.info(f"test connection succeed at {str(result)}")
end_time = datetime.now()
logger.info(
"The project has been successfully executed, with a runtime of {0}.".format(
end_time - start_time
)
)
except Exception as e:
logger.exception(f"getting error message {str(e)}")
if __name__ == "__main__":
main()
| rajeshraj124/advanced_logger_with_single_place_credentials | pro_sample/main.py | main.py | py | 1,997 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numb... |
34796334569 | from django.urls import resolve, reverse
from .. import views
from .test_recipes_base import RecipesTestBase
class RecipesSearchViewTest(RecipesTestBase):
def test_search_view_function_is_correct(self):
view = resolve(reverse('recipes:search'))
self.assertIs(view.func, views.search)
def test_search_loads_correct_template(self):
response = self.client.get(reverse('recipes:search') + '?q=test')
self.assertTemplateUsed(response, 'recipes/pages/search.html')
def test_search_raises_404_if_no_search_term(self):
response = self.client.get(reverse('recipes:search'))
self.assertEqual(response.status_code, 404)
def test_search_term_is_on_page_title_and_escaped(self):
response = self.client.get(reverse('recipes:search') + '?q=<test>')
self.assertIn('Search for "<test>"',
response.content.decode('utf-8'))
def test_search_can_find_by_title(self):
title1 = 'This is recipe one'
title2 = 'This is recipe two'
recipe1 = self.create_recipe(
slug='one', title=title1, author={'username': 'one'})
recipe2 = self.create_recipe(
slug='two', title=title2, author={'username': 'two'})
search_url = reverse('recipes:search')
response1 = self.client.get(f'{search_url}?q={title1}')
response2 = self.client.get(f'{search_url}?q={title2}')
response3 = self.client.get(f'{search_url}?q=this')
self.assertIn(recipe1, response1.context['recipes'])
self.assertNotIn(recipe2, response1.context['recipes'])
self.assertIn(recipe2, response2.context['recipes'])
self.assertNotIn(recipe1, response2.context['recipes'])
self.assertIn(recipe1, response3.context['recipes'])
self.assertIn(recipe2, response3.context['recipes'])
| giovcandido/django-course-project1 | recipes/tests/test_recipes_search_view.py | test_recipes_search_view.py | py | 1,872 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "test_recipes_base.RecipesTestBase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.resolve",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 9,
"usage_type": "call"
},
{
"api_name":... |
22869043882 | import pygame, sys #기본세팅
import random, time #내가 추가한 것
from pygame.locals import *
#Set up pygame.
pygame.init()
#상수 정의
SCREEN =8
BLACK = (0,0,0)
GREEN = (0, 128, 0)
WHITE = (255, 255, 255)
BLUE = (0,0,255)
RED = (255,0,0)
YELLOW = (255,204,51)
screen = pygame.display.set_mode((600,400), 0,32)
pygame.display.set_caption("Othello")
#화면 세팅
screen.fill(GREEN)
#가로 줄 긋기
for x in range(0, 8):
if x==0:
continue
else:
pygame.draw.line(screen, BLACK, [0,x*50],[400,x*50],5)
#세로 줄 긋기
for y in range(0,9):
if y==0:
continue
else:
pygame.draw.line(screen, BLACK, [y*50,0],[y*50,400],5)
#오른쪽에 상태창 만들기
pygame.draw.rect(screen, WHITE, [403,0,200,400])
#각 위치에서의 블럭값들 초기화
screenArr = [] #리스트 안에 리스트. 열을 나누기 위함.
for y in range(0,SCREEN):
colList =[]
for x in range(0,SCREEN):
colList.append(0)
screenArr.append(colList)
screenArr[3][3]=2
screenArr[3][4]=1
screenArr[4][3]=1
screenArr[4][4]=2
#변수
currentTurn =1 #현재 턴- 플레이어 :1 컴퓨터 :2
diagnoalScreenArr =[] #대각선 검사를 위한 변수, 3차원 배열
for i in range(0,4):
rowList =[]
for y in range(0,SCREEN):
colList =[]
for x in range(0,SCREEN):
colList.append(0)
rowList.append(colList)
diagnoalScreenArr.append(rowList)
#함수
def changeTurn(pTurn):
if pTurn ==1: #플레이어의 턴을 컴퓨터의 턴으로 전환
return 2
elif pTurn ==2:
return 1
else:
return -1 #오류인 경우
def changeArrxToX(arrx): #x, y를 arrx, arry로 바꿔서 리턴.
for x in range(0,SCREEN):
if arrx==x:
return 25*(arrx*2+1)
def changeArryToY(arry):
for y in range(0,SCREEN):
if arry ==y:
return 25*(arry*2+1)
def viewGameScreen(): #screen이 해당 값들을 가지면 해당 블럭 출력
for arry in range(0,SCREEN):
for arrx in range(0,SCREEN):
if screenArr[arry][arrx] ==1: #플레이어
pygame.draw.circle(screen, BLACK, [changeArrxToX(arrx),changeArryToY(arry)], 20)
elif screenArr[arry][arrx] ==2: #컴퓨터
pygame.draw.circle(screen, WHITE, [changeArrxToX(arrx),changeArryToY(arry)], 20)
elif screenArr[arry][arrx] ==3: #컴퓨터의 블럭 랜덤위치
pygame.draw.circle(screen, BLUE, [changeArrxToX(arrx),changeArryToY(arry)], 20)
elif screenArr[arry][arrx] ==4: #게임이 끝난후 빈 공간이 있을 때 사용
pygame.draw.circle(screen, GREEN, [changeArrxToX(arrx),changeArryToY(arry)], 20)
def changeMousePosXToArrx(mousePosX):
for i in range(0,SCREEN):
if mousePosX < 50 * (i+1) -5 and mousePosX > 50*i +5:
return i
else:
return -1 #오류일 경우
def changeMousePosYToArry(mousePosY):
for i in range(0,SCREEN):
if mousePosY < 50 * (i+1) -5 and mousePosY > 50*i +5: #경계 안쪽
return i
else: #화면의 검은색 경계 부분
return -1 #오류일 경우
def checkIfTherisBlock(pScreenArr): #해당 자리에 블럭이 현재 있는지 없는지
#iScreenArr : screenArr을 매개변수로 받아야하는데 헷갈릴까봐
#parameter에서 p를 따옴
if pScreenArr == 1 or pScreenArr ==2: #플레이어 또는 컴퓨터의 블럭
return 1 #블럭이 이미 있음을 리턴
else:
return 0 #블럭이 해당자리에 없음을 리턴
def setDiagonalCnt(): #대각선 검사를 위해 미리 대각선 개수 설정
#왼쪽 위 방향 대각선
diagonalDir =0
for row in range(0,SCREEN):
for col in range(7, row-1,-1):
diagnoalScreenArr[diagonalDir][row][col]=row
remainingCol = row
num =0
for col in range(0, remainingCol):
diagnoalScreenArr[diagonalDir][row][col] = num
num=num+1
#오른쪽 위 방향 대각선
diagonalDir =1
for row in range(0,SCREEN):
for col in range(0, SCREEN-row):
diagnoalScreenArr[diagonalDir][row][col]=row
remainingCol = 7 -row
num =row
for col in range(remainingCol, SCREEN):
diagnoalScreenArr[diagonalDir][row][col] = num
num = num-1
#왼쪽 아래 방향 대각선
diagonalDir =2
for row in range(7, -1, -1):
for col in range(7, 6-row, -1):
diagnoalScreenArr[diagonalDir][row][col] = 7-row
remainingCol = 7-row
num =0
for col in range(0, remainingCol):
diagnoalScreenArr[diagonalDir][row][col] = num
num = num+1
#오른쪽 아래 대각선 개수
diagonalDir =3
for row in range(7, -1, -1):
for col in range(0, 1+row):
diagnoalScreenArr[diagonalDir][row][col] =7-row
remainingCol = row+1
num = 6-row
for col in range(remainingCol, SCREEN):
diagnoalScreenArr[diagonalDir][row][col] = num
num = num-1
#setDiagonalCnt()함수 시각적 확인
##setDiagonalCnt()
##for x in range(0,8):
## print(diagnoalScreenArr[0][x])
def InspectIfItCanBePlacedInPlace(pArrx, pArry, changeValue, pCurrentTurn): #해당 위치에 블럭을 놓을 수 있는 자리인지 검사
returnValue=0
if 1==checkIfTherisBlock(screenArr[pArry][pArrx]):
return 0
#대각선 검사
for diagonalValue in range(0,4):
if diagnoalScreenArr[diagonalValue][pArry][pArrx] != 0:
if diagonalValue==0: #왼쪽 위방향
if screenArr[pArry-1][pArrx-1] == changeTurn(pCurrentTurn):
for a in range(1, diagnoalScreenArr[diagonalValue][pArry][pArrx]+1):
if screenArr[pArry-a][pArrx-a]==0:
break
elif screenArr[pArry-a][pArrx-a] ==pCurrentTurn:
for b in range(1, a):
if changeValue ==True:
screenArr[pArry-b][pArrx-b] =pCurrentTurn
returnValue =1
break
if diagonalValue ==1: #오른쪽 위 방향
if screenArr[pArry-1][pArrx+1] == changeTurn(pCurrentTurn):
for a in range(1, diagnoalScreenArr[diagonalValue][pArry][pArrx]+1):
if screenArr[pArry-a][pArrx+a]==0:
break
elif screenArr[pArry-a][pArrx+a]==pCurrentTurn:
for b in range(1, a):
if changeValue ==True:
screenArr[pArry-b][pArrx+b]=pCurrentTurn
returnValue =1
break
if diagonalValue ==2: #왼쪽 아래 방향
if screenArr[pArry+1][pArrx-1] == changeTurn(pCurrentTurn):
for a in range(1, diagnoalScreenArr[diagonalValue][pArry][pArrx]+1):
if screenArr[pArry+a][pArrx-a]==0:
break
elif screenArr[pArry+a][pArrx-a]==pCurrentTurn:
for b in range(1, a):
if changeValue ==True:
screenArr[pArry+b][pArrx-b]=pCurrentTurn
returnValue =1
break
if diagonalValue ==3: #오른쪽 아래 방향
if screenArr[pArry+1][pArrx+1] == changeTurn(pCurrentTurn):
for a in range(1, diagnoalScreenArr[diagonalValue][pArry][pArrx]+1):
if screenArr[pArry+a][pArrx+a]==0:
break
elif screenArr[pArry+a][pArrx+a]==pCurrentTurn:
for b in range(1, a):
if changeValue ==True:
screenArr[pArry+b][pArrx+b]=pCurrentTurn
returnValue =1
break
#행 검사 - 위 방향으로 검사
if pArry != 0: #pArry가 0이면 검사할 때 리스트 인덱스 넘어감
if screenArr[pArry-1][pArrx] == changeTurn(pCurrentTurn):
for a in range(pArry-1, -1, -1):
if screenArr[a][pArrx] ==0:
break
elif screenArr[a][pArrx] ==pCurrentTurn:
for b in range(pArry-1, a,-1):
if changeValue ==True:
screenArr[b][pArrx] =pCurrentTurn
returnValue =1
break
#행 검사 - 아래 방향으로 검사
if pArry != SCREEN-1:
if screenArr[pArry+1][pArrx] == changeTurn(pCurrentTurn):
for a in range(pArry+1, SCREEN):
if screenArr[a][pArrx] ==0:
break
elif screenArr[a][pArrx]==pCurrentTurn:
for b in range(pArry+1, a):
if changeValue ==True:
screenArr[b][pArrx]=pCurrentTurn
returnValue =1
break
#열 검사 - 왼쪽 방향으로 검사
if pArrx !=0:
if screenArr[pArry][pArrx-1] == changeTurn(pCurrentTurn):
for a in range(pArrx-1, -1,-1):
if screenArr[pArry][a] ==0:
break
elif screenArr[pArry][a] ==pCurrentTurn:
for b in range(pArrx-1, a, -1):
if changeValue ==True:
screenArr[pArry][b] =pCurrentTurn
returnValue =1
break
#열 검사 - 오른쪽 방향으로 검사
if pArrx != SCREEN-1:
if screenArr[pArry][pArrx+1] == changeTurn(pCurrentTurn):
for a in range(pArrx+1, SCREEN):
if screenArr[pArry][a] ==0:
break
elif screenArr[pArry][a] ==pCurrentTurn:
for b in range(pArrx+1, a):
if changeValue ==True:
screenArr[pArry][b] =pCurrentTurn
returnValue =1
break
return returnValue #놓을 수 있는 곳이 없을 경우:0 있을 경우 :1
def calculateComputerRandomPlace(randomComputerNum): #컴퓨터가 놓는 위치 랜덤으로 계산
randNum=0
randNum = random.randrange(1, randomComputerNum+1)
return randNum
def setWhereComputerCanPutBlock():
randomComputerNum =1
tmpRow=-1
tmpCol=-1
noMeaningStorage=0
computerRandomPlace =[]
#computerRandomPlace 모두 0으로 초기화(8x8 2차원 배열)
for y in range(0,SCREEN):
colList =[]
for x in range(0,SCREEN):
colList.append(0)
computerRandomPlace.append(colList)
for row in range(0, SCREEN):
for col in range(0,SCREEN):
if InspectIfItCanBePlacedInPlace(col, row, False, currentTurn) ==1:
computerRandomPlace[row][col] = randomComputerNum
randomComputerNum = randomComputerNum +1
randomComputerNum = calculateComputerRandomPlace(randomComputerNum-1) #-1하는 이유 맨 마지막에 +1돼서 끝나기 때문
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if computerRandomPlace[row][col] == randomComputerNum:
screenArr[row][col] =3 #컴퓨터가 랜덤으로 놓을위치 파랑색으로 설정
tmpRow = row
tmpCol = col
#컴퓨터가 랜덤으로 놓을 위치 미리 보여주기
viewGameScreen()
pygame.display.update()
#해당 위치 원래 컴퓨터 블럭색으로 변경
time.sleep(2)
noMeaningStorage = InspectIfItCanBePlacedInPlace(tmpCol, tmpRow, True, currentTurn)
screenArr[tmpRow][tmpCol] = 2 #컴퓨터가 랜덤으로 놓을위치 원래색인 하얀색으로 변경
viewGameScreen()
pygame.display.update()
def moveNextTurnWhenBlockCanNotPutPlace(): #둘 곳이 없을 경우 다음턴으로 넘어간다.
global currentTurn
global isClick
cannotPutPlaceCnt =0
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if screenArr[row][col] == 0:
if InspectIfItCanBePlacedInPlace(col,row,False, currentTurn)==1:
cannotPutPlaceCnt = cannotPutPlaceCnt+1
if cannotPutPlaceCnt ==0 :
currentTurn = changeTurn(currentTurn)
print(currentTurn,"의 유저가 놓을 곳이 없습니다. ")
clearStateScreen(False)
printTurnInformation() #플레이어 -> 컴퓨터 턴 : 컴퓨터 턴 출력
time.sleep(1)
def viewGameResult():
font = pygame.font.SysFont("arial",20,True)
playerBlockCnt =0
computerBlockCnt =0
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if screenArr[row][col] ==1:
playerBlockCnt = playerBlockCnt+1
elif screenArr[row][col] ==2:
computerBlockCnt = computerBlockCnt+1
screenArr[row][col] = 4
tmpBlockCnt =0
isFirstCheck = False
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if (tmpBlockCnt < playerBlockCnt) and isFirstCheck == False:
screenArr[row][col] =1
tmpBlockCnt = tmpBlockCnt+1
else:
if isFirstCheck == False:
isFirstCheck = True
tmpBlockCnt =0
if tmpBlockCnt < computerBlockCnt:
screenArr[row][col] =2
tmpBlockCnt = tmpBlockCnt+1
print("컴퓨터 블럭 개수 : ", computerBlockCnt)
print("플레이어 블럭 개수 : ", playerBlockCnt)
print("tmpBlockCnt : ", tmpBlockCnt)
clearStateScreen(True)
if computerBlockCnt < playerBlockCnt:
printWinner("Player")
elif computerBlockCnt > playerBlockCnt:
printWinner("Computer")
else: #동점
printWinner("Draw")
viewGameScreen()
printBlockCnt(playerBlockCnt, computerBlockCnt)
pygame.display.update()
print("개수 출력화면까지 끝")
time.sleep(3)
#이후 다시 게임을 다시할지 시작화면으로갈지 끌지 선택.
sys.exit()
def ifNoOneDoNotPutBlock():
enablePutBlock= [True,True]
for row in range(0,SCREEN):
for col in range(0,SCREEN):
#플레이어 검사와 검퓨터 모두 블럭을 둘 곳이 없을 경우
if 1==InspectIfItCanBePlacedInPlace(col,row,False,1):
#print("플레이어 : (",row,col,") : 0")
enablePutBlock[0] = False
if 1==InspectIfItCanBePlacedInPlace(col,row,False,2):
#print("컴퓨터 : (",row,col,") : 0")
enablePutBlock[1] = False
if enablePutBlock[0] ==True and enablePutBlock[1] ==True:
return True
else:
return False
def checkGameOver():
spaceFilledCnt =0
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if screenArr[row][col] ==1 or screenArr[row][col] ==2:
spaceFilledCnt= spaceFilledCnt+1
if spaceFilledCnt == SCREEN * SCREEN or ifNoOneDoNotPutBlock() == True:
clearStateScreen(True)
printGameOverText()
printCalculateGameResult()
time.sleep(5) #결과 집계중 5초동안 띄운 뒤 결과 보여주기
viewGameResult()
def printTurn(pTurn):
if pTurn ==1:
return "Player"
elif pTurn ==2:
return "Computer"
else:
return "Error"
def clearStateScreen(isGameOver):
clearScreenScaleY =145
if isGameOver == True:
clearScreenScaleY = 400
pygame.draw.rect(screen, WHITE, [403,0,200,clearScreenScaleY])
pygame.display.update()
def printTurnInformation():
userTextFont = pygame.font.SysFont("arial",20, True)
userTextContentFont = pygame.font.SysFont("arial",20)
userText = userTextFont.render("Current Turn : ", True, BLACK)
userTextContent = userTextContentFont.render(printTurn(currentTurn), True, BLACK)
screen.blit(userText, (410,100))
screen.blit(userTextContent, (525,100))
pygame.display.update()
def printUserColorInformation():
font = pygame.font.SysFont("arial",20,True)
playerColor = font.render("Player Color : ", True, BLACK)
computerColor = font.render("Computer Color : ", True, BLACK)
screen.blit(playerColor, (410,150))
screen.blit(computerColor, (410,200))
pygame.draw.rect(screen, GREEN, (548, 148, 30, 30))
pygame.draw.circle(screen, BLACK, [563, 163], 10)
pygame.draw.rect(screen, GREEN, (548, 198, 30, 30))
pygame.draw.circle(screen, WHITE, [563, 213], 10)
pygame.display.update()
def printGameOverText():
font = pygame.font.SysFont("arial",30,True)
text = font.render("-Game Over-", True, RED)
screen.blit(text, (425,50))
pygame.display.update()
def printCalculateGameResult(): #게임 결과 계산중 이라고 출력
font = pygame.font.SysFont("arial",15)
text = font.render("~Calculating Game Result~", True, BLACK)
screen.blit(text, (425,100))
pygame.display.update()
def printWinner(winner):
winnerFont = pygame.font.SysFont("arial",40)
winnerContentFont = pygame.font.SysFont("arial",30)
if winner != "Draw":
winnerText = winnerFont.render("Winner", True, RED)
else:
winnerText = winnerFont.render("Result", True, RED)
winnerContentText = winnerContentFont.render("-"+winner+"-", True, YELLOW)
screen.blit(winnerText, (450,50))
if winner == "Computer":
screen.blit(winnerContentText, (440,100))
elif winner == "Plyaer":
screen.blit(winnerContentText, (460,100))
else:
screen.blit(winnerContentText, (460,100))
pygame.display.update()
def printBlockCnt(playerBlockCnt, computerBlockCnt):
font = pygame.font.SysFont("arial",20)
playerBlockCntText = font.render("Player Block : "+ str(playerBlockCnt), True, BLACK)
computerBlockCntText = font.render("Computer Block : " + str(computerBlockCnt), True, BLACK)
screen.blit(playerBlockCntText, (440,200))
screen.blit(computerBlockCntText, (430,225))
pygame.display.update()
def printReplayButton():
font = pygame.font.SysFont("arial",40)
replayBtnText = font.render("Replay", True, WHITE,2)
screen.blit(replayBtnText, (100,200))
def printGoStartScreenButton():
font = pygame.font.SysFont("arial",40)
goStartScreenBtnText = font.render("Go StartScreen", True, WHITE,2)
screen.blit(goStartScreenBtnText, (300,200))
#둘다 블럭을 놓을 수 없는 경우
##for row in range(0,SCREEN):
## for col in range(0,SCREEN):
## screenArr[row][col] =2
##screenArr[2][6] =1
##screenArr[2][2] =1
##screenArr[3][3] =1
##screenArr[4][4] =1
##screenArr[4][6] =1
##screenArr[5][5] =1
##screenArr[7][7] =1
##screenArr[6][7] =0
##
##for row in range(0,SCREEN):
## for col in range(0,SCREEN):
## screenArr[row][col] =2
##
##screenArr[2][2]=1
##screenArr[1][0]=0
##screenArr[2][0]=0
##screenArr[3][0]=0
setDiagonalCnt()
viewGameScreen()
printTurnInformation()
printUserColorInformation()
printReplayButton()
printGoStartScreenButton()
#Game Loop
while True:
checkGameOver()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
mousePosToArr = []
mousePosToArr.append(changeMousePosXToArrx(pygame.mouse.get_pos()[0]))
mousePosToArr.append(changeMousePosYToArry(pygame.mouse.get_pos()[1]))
if not(mousePosToArr[0] ==-1 or mousePosToArr[1] ==-1):
if InspectIfItCanBePlacedInPlace(mousePosToArr[0],mousePosToArr[1], True, currentTurn) ==1:
mousePos = pygame.mouse.get_pos() #자료형 : tuple
screenArr[changeMousePosYToArry(mousePos[1])][changeMousePosXToArrx(mousePos[0])] =1 #클릭한 곳 색깔 바꾸기
currentTurn = changeTurn(currentTurn) #턴 바꾸기
clearStateScreen(False)
printTurnInformation() #플레이어 -> 컴퓨터 턴 : 컴퓨터 턴 출력
viewGameScreen()
pygame.display.update()
moveNextTurnWhenBlockCanNotPutPlace()
if currentTurn ==2 :
time.sleep(2)
setWhereComputerCanPutBlock()
currentTurn = changeTurn(currentTurn)
clearStateScreen(False)
printTurnInformation() #컴퓨터 -> 플레이어 턴 : 컴퓨터 턴 출력
| Choiseungpyo/Othello_Python | Othello.py | Othello.py | py | 22,209 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
30800987858 | print('1')
#from src.__init__ import main
from flask import Flask, request
from flask_restx import Api, Namespace, fields, Resource
## local
from settings import model_path, vocab_path, cnn_path
from torch import nn
# from src.controller.analyzeController import Analyze
from src.controller.keywordController import Keyword
from src.controller.testController import Sample
from src.controller.divideHighlightController import Divide_Highlight
if __name__ == "__main__" :
from load_models import koBERT_CNN_Classifier
prediction = koBERT_CNN_Classifier(model_path=model_path, vocab_path=vocab_path, cnn_path=cnn_path)
# app.run(debug=True, host='0.0.0.0')
class Classifier(nn.Module):
def __init__(self,
hidden_size=768,
num_classes=8,
dr_rate=0.0):
super(Classifier, self).__init__()
# 16, 2848
# 32, 5696
# 1312
self.kernel_num = 16
self.conv1d_maxpooling1 = nn.Sequential(
nn.Conv1d(hidden_size, self.kernel_num, 4, stride=2),
nn.ReLU(),
nn.MaxPool1d(2, 1),
nn.Dropout(dr_rate)
)
self.conv1d_maxpooling2 = nn.Sequential(
nn.Conv1d(hidden_size, self.kernel_num, 8, stride=2),
nn.ReLU(),
nn.MaxPool1d(2, 1),
nn.Dropout(dr_rate)
)
self.conv1d_maxpooling3 = nn.Sequential(
nn.Conv1d(hidden_size, self.kernel_num, 16, stride=2),
nn.ReLU(),
nn.MaxPool1d(2, 1),
nn.Dropout(dr_rate)
)
self.classifier = nn.Linear(1312, num_classes)
def forward(self, x) :
out1 = self.conv1d_maxpooling1(x.transpose(1, 2))
out2 = self.conv1d_maxpooling2(x.transpose(1, 2))
out3 = self.conv1d_maxpooling3(x.transpose(1, 2))
out = torch.cat((out1, out2, out3), 2)
out = out.reshape(out.size(0), -1)
return self.classifier(out)
#from load_models import koBERT_CNN_Classifier
from settings import model_path, cnn_path, vocab_path
from torch import nn
import torch
from src.preprocessor.textPreprocessor import textPreprocessor
print('2')
app = Flask(__name__)
api = Api(
app,
version='0.1',
title="PS HELPER API Server",
description="PS HELPER API 문서입니다.",
terms_url="/",
contact_url="donghoon149@gmail.com / hmcck27@gmail.com",
license="MIT"
)
Analyze = Namespace(
name="Analyze Algorithm",
description='문제 지문을 받고 적절한 <strong>알고리즘 태그</strong>를 반환합니다.',
)
api.add_namespace(Divide_Highlight, '/api/v1/divide_highlight')
api.add_namespace(Keyword, '/api/v1/keyword')
api.add_namespace(Analyze, '/api/v1/analyze')
api.add_namespace(Sample, '/api/v1/test')
# Model 객체 생성
analyze_fields = Analyze.model('Problem', {
'problem_id': fields.Integer(description='문제 번호', required=True, example="1007"),
'content': fields.String(description='문제 지문', required=True,
example="평면 상에 N개의 점이 찍혀있고, 그 점을 집합 P라고 하자. 하지만 집합 P의 벡터 매칭은 벡터의 집합인데, 모든 벡터는 집합 P의 한 점에서 시작해서, 또 다른 점에서 끝나는 벡터의 집합이다. 또, P에 속하는 모든 점은 한 번씩 쓰여야 한다.V에 있는 벡터의 개수는 P에 있는 점의 절반이다.평면 상의 점이 주어졌을 때, 집합 P의 벡터 매칭에 있는 벡터의 합의 길이의 최솟값을 출력하는 프로그램을 작성하시오."),
'input': fields.String(description='문제 입력사항', required=False,
example="첫째 줄에 테스트 케이스의 개수 T가 주어진다. 각 테스트 케이스는 다음과 같이 구성되어있다. 테스트 케이스의 첫째 줄에 점의 개수 N이 주어진다. N은 짝수이다. 둘째 줄부터 N개의 줄에 점의 좌표가 주어진다. N은 20보다 작거나 같은 자연수이고, 좌표는 절댓값이 100,000보다 작거나 같은 정수다. 모든 점은 서로 다르다."),
})
algorithm_fields = fields.Wildcard(fields.String)
analyze_response = Analyze.model('Problem_response', {
'problem_id': fields.String(description='문제 번호', required=True, example="1007"),
'problem_url': fields.String(description="문제 url", required=True, example="www.psHelper.de"),
'algorithm_type': algorithm_fields
})
''' test '''
print('sdfsdfsdfsdf')
@Analyze.route('')
class AnalyzeController(Resource):
@Analyze.expect(analyze_fields)
@Analyze.response(201, "Success", analyze_response)
def post(self):
content = request.json.get('content')
text_preprocessor = textPreprocessor()
'''
TO-DO
0. preprocess text
1. analyze the description
'''
preprocessed_text = text_preprocessor.preprocessing(content)
# tag = TagAnalyzer.findTag(preprocessed_text)
tag,ratio = prediction.predict(preprocessed_text)
# print(content)
return {
'problem_id': request.json.get('problem_id'),
'problem_url': "https://www.acmicpc.net/problem/" + str(request.json.get('problem_id')),
'algorithm_type' : tag,
'algorithm_ratio' : ratio
}, 201
print('sdfsdfwerwer')
# class Classifier(nn.Module):
# def __init__(self,
# hidden_size=768,
# num_classes=8,
# dr_rate=0.0):
# super(Classifier, self).__init__()
# # 16, 2848
# # 32, 5696
# # 1312
# self.kernel_num = 16
# self.conv1d_maxpooling1 = nn.Sequential(
# nn.Conv1d(hidden_size, self.kernel_num, 4, stride=2),
# nn.ReLU(),
# nn.MaxPool1d(2, 1),
# nn.Dropout(dr_rate)
# )
# self.conv1d_maxpooling2 = nn.Sequential(
# nn.Conv1d(hidden_size, self.kernel_num, 8, stride=2),
# nn.ReLU(),
# nn.MaxPool1d(2, 1),
# nn.Dropout(dr_rate)
# )
# self.conv1d_maxpooling3 = nn.Sequential(
# nn.Conv1d(hidden_size, self.kernel_num, 16, stride=2),
# nn.ReLU(),
# nn.MaxPool1d(2, 1),
# nn.Dropout(dr_rate)
# )
#
# self.classifier = nn.Linear(1312, num_classes)
#
# def forward(self, x) :
# out1 = self.conv1d_maxpooling1(x.transpose(1, 2))
# out2 = self.conv1d_maxpooling2(x.transpose(1, 2))
# out3 = self.conv1d_maxpooling3(x.transpose(1, 2))
# out = torch.cat((out1, out2, out3), 2)
# out = out.reshape(out.size(0), -1)
# return self.classifier(out)
#if __name__ == "__main__":
# app.run(debug=True, host='0.0.0.0')
| hmcck27/pshelper-server | src/app_for_server.py | app_for_server.py | py | 6,957 | python | ko | code | null | github-code | 36 | [
{
"api_name": "load_models.koBERT_CNN_Classifier",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "settings.model_path",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "settings.vocab_path",
"line_number": 20,
"usage_type": "name"
},
{
"api_nam... |
73811093863 | import logging
logger = logging.getLogger(__name__)
def app(environ, start_response):
path = environ.get('PATH_INFO', '')
if path == '/exception':
raise Exception('My exception!')
data = "Request on %s \n" % path
logger.info(data, extra={'tags': ['role:web', 'env:prod']})
start_response("200 OK", [
("Content-Type", "text/plain"),
("Content-Length", str(len(data)))
])
return iter([data])
| sebest-blog/gunicorn-with-docker | myapp.py | myapp.py | py | 453 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
}
] |
1783613538 | import sqlite3
import time
from telethon import TelegramClient
from telethon import sync, events
import re
import json
db = sqlite3.connect('Account.db')
cur = db.cursor()
x = 1
m = 0
while(True):
if x == 23:
print("Всего добыто:")
print(m)
break
cur.execute(f"SELECT PHONE FROM Account WHERE ID = '{x}'")
time.sleep(0.4)
Phone = str(cur.fetchone()[0])
print("Входим в аккаунт: " + Phone)
cur.execute(f"SELECT API_ID FROM Account WHERE ID = '{x}'")
time.sleep(0.4)
api_id = str(cur.fetchone()[0])
cur.execute(f"SELECT API_HASH FROM Account WHERE ID = '{x}'")
time.sleep(0.4)
api_hash = str(cur.fetchone()[0])
session = str("anon" + str(x))
client = TelegramClient(session, api_id, api_hash)
client.start()
dlgs = client.get_dialogs()
for dlg in dlgs:
if dlg.title == 'LTC Click Bot':
tegmo = dlg
client.send_message('LTC Click Bot', "/balance")
time.sleep(3)
msgs = client.get_messages(tegmo, limit=1)
for mes in msgs:
str_a = str(mes.message)
zz = str_a.replace('Available balance: ', '')
qq = zz.replace(' LTC', '')
print(qq)
waitin = float(qq)
m = m + waitin
#print(m)
x = x + 1
time.sleep(1)
| Black-Triangle-code/Telegram_coin_bot | balance.py | balance.py | py | 1,310 | python | en | code | 123 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": ... |
71903174184 | #! /usr/bin/env python
import matplotlib.pyplot as plt
from scipy.fftpack import fft
from scipy.io import wavfile # get the api
from scipy.signal import fftconvolve, convolve, stft, butter
import numpy as np
from scipy import signal
import warnings
warnings.filterwarnings("ignore")
from numpy import array, diff, where, split
from scipy import arange
# fs, data = wavfile.read('Ding.wav') # load OG file
# a = data.T[0]
# fs2, data2 = wavfile.read('Long.wav') # load the data
# a2 = data2.T[0]
# d = fftconvolve(a, a2)
# print(d.shape)
# for i in range(len(d)):
# if d[i] > 0.85: #Tune this for the DING
# print('Do something')
# break
# plt.plot(d)
# plt.show()
#import keyboard
def findPeak(magnitude_values, noise_level=2000):
splitter = 0
# zero out low values in the magnitude array to remove noise (if any)
magnitude_values = np.asarray(magnitude_values)
low_values_indices = magnitude_values < noise_level # Where values are low
magnitude_values[low_values_indices] = 0 # All low values will be zero out
indices = []
flag_start_looking = False
both_ends_indices = []
length = len(magnitude_values)
for i in range(length):
if magnitude_values[i] != splitter:
if not flag_start_looking:
flag_start_looking = True
both_ends_indices = [0, 0]
both_ends_indices[0] = i
else:
if flag_start_looking:
flag_start_looking = False
both_ends_indices[1] = i
# add both_ends_indices in to indices
indices.append(both_ends_indices)
return indices
def extractFrequency(indices, freq_bins, freq_threshold=2):
extracted_freqs = []
for index in indices:
freqs_range = freq_bins[index[0]: index[1]]
avg_freq = round(np.average(freqs_range))
if avg_freq not in extracted_freqs:
extracted_freqs.append(avg_freq)
# group extracted frequency by nearby=freq_threshold (tolerate gaps=freq_threshold)
group_similar_values = split(extracted_freqs, where(diff(extracted_freqs) > freq_threshold)[0]+1 )
# calculate the average of similar value
extracted_freqs = []
for group in group_similar_values:
extracted_freqs.append(round(np.average(group)))
#print("freq_components", extracted_freqs)
return extracted_freqs
import pyaudio
ding_left = np.load('ding_select_floor2_left_mic.npy')
ding_right = np.load('ding_select_floor7_right_mic.npy')
CHUNK = 4096 # number of data points to read at a time
RATE = 48000 # time resolution of the recording device (Hz)
p=pyaudio.PyAudio() # start the PyAudio class
stream=p.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,
frames_per_buffer=CHUNK) #uses default input device
while 1:
data_buffer = np.array([])
# create a numpy array holding a single read of audio data
for i in range(10): #to it a few times just to see
data = np.frombuffer(stream.read(CHUNK),dtype=np.int16)
data_buffer = np.concatenate([data_buffer, data])
fs = RATE
# f1, t1, ding_left2 = signal.stft(ding_left, fs, nperseg=1000)
# f2, t2, ding_right2 = signal.stft(ding_right, fs, nperseg=1000)
# f,t,data_buffer2= signal.stft(data_buffer, fs, nperseg=1000)
number_samples = len(data_buffer)
freq_bins = arange(number_samples) * RATE/number_samples
#ding_left2 = fft(ding_left)
#ding_right2 = fft(ding_right)
data_buffer_fft = fft(data_buffer)
#data_buffer_fft = np.fft.fftfreq(len(data_buffer), data_buffer)
#print(data_buffer2)
normalization_data = data_buffer_fft/number_samples
magnitude_values = normalization_data[range(len(data_buffer_fft)//2)]
magnitude_values = np.abs(magnitude_values)
indices = findPeak(magnitude_values=magnitude_values, noise_level=100)
frequencies = extractFrequency(indices, freq_bins)
#print(frequencies)
# amp = 2 * np.sqrt(2)
# plt.pcolormesh(t1, f1, np.abs(ding_left), vmin=0)
# plt.pcolormesh(t2, f2, np.abs(ding_right), vmin=0)
# plt.ylabel('Frequency [Hz]')
# plt.xlabel('Time [sec]')
# plt.show()
#x = np.abs(data_buffer2).mean()
x = max(frequencies)
#x = x/1000
print(x)
if x > 750 and x < 800:
print("RIGHT DING MAYBE")
# if x > 270 and x < 350:
# print("LEFT DING MAYBE")
# if x > 1300 and x < 1400:
# print("RIGHT DING MAYBE")
# if x > 500 and x < 550:
# print("LEFT DING MAYBE")
#print(np.abs(data_buffer).max())
# d_left = convolve(ding_left, data_buffer)
# d_right = convolve(ding_right, data_buffer)
# dlmax = d_left.mean()
# drmax = d_right.mean()
#print("left ding is:" +str(dlmax) + "right ding is:" +str(drmax))
#print("right new is:" + str(d_right_fft.mean()))
#FLOOR 7
# if dlmax > 20173224741.999992:
# print('Left DING')
# if drmax > 30888468567.000004:
# print('Right DING')
# if dlmax > 10008361056.999992:
# print('Left DING')
# if drmax > 2000511377.789566:
# print('Right DING')
# data_buffer = np.load('ding2.npy')[73000: 130000]
# np.save('ding_select.npy', data_buffer)
# plt.plot(data_buffer)
# plt.show()
# d = fftconvolve(a, data)
# plt.plot(d)
# print(d.max())
# plt.show()
# close the stream gracefully
stream.stop_stream()
stream.close()
p.terminate() | buoyancy99/BobaBot | voice_utils/src/old_detection.py | old_detection.py | py | 5,313 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.split",
... |
37290989369 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0003_remove_userprofile_title'),
]
operations = [
migrations.CreateModel(
name='RegistrationPath',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
],
),
migrations.AlterField(
model_name='userprofile',
name='mobile_number',
field=models.CharField(max_length=11, null=True, blank=True),
),
migrations.AddField(
model_name='userprofile',
name='registration_path',
field=models.ForeignKey(blank=True, to='user_profile.RegistrationPath', null=True),
),
]
| bitapardaz/diabet | user_profile/migrations/0004_auto_20171023_0837.py | 0004_auto_20171023_0837.py | py | 952 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 14,
"usage_type": "call"
},
... |
14302599067 | import os
import cv2 as cv
import numpy as np
people = []
for i in os.listdir(r'C:\Users\Atul\Downloads\OpenCV_course\opencv-course-master\Resources\Faces\train'):
people.append(i) #each folder in the faces folder corresponds to one person ben affleck,mindy,etc and name of folder is person's name so we store target variables
DIR = r'C:\Users\Atul\Downloads\OpenCV_course\opencv-course-master\Resources\Faces\train'
haar_cascade = cv.CascadeClassifier('haar_face.xml') #calling haarcascade detector
# Creating the training set
features = []
labels = []
def create_train(): #loop over all folders in the training folder and then loop over all images within and store in training set. Within each image detect only the face and crop it out using haarcascade face detector
for person in people:
path = os.path.join(DIR, person) #to get path for folder of each person
label = people.index(person) #text classes need to be converted to numerical categories
for img in os.listdir(path):
img_path = os.path.join(path, img) #create path for each image in each person's folder
img_array = cv.imread(img_path)
gray = cv.cvtColor(img_array, cv.COLOR_BGR2GRAY)
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors=4)
for (x,y,w,h) in faces_rect:
faces_roi = gray[y:y+h ,x:x+w] #cropping out just the face from the image
features.append(faces_roi)
labels.append(label)
create_train()
print('Training data created -------------')
features = np.array(features, dtype='object') #all pixels of each image are flattened out to a single row of all pixel values for an image
labels = np.array(labels)
face_recognizer = cv.face.LBPHFaceRecognizer_create() #instantiating out in-built face recognizer model
# Train recognizer on features list and labels list
face_recognizer.train(features,labels)
face_recognizer.save('face_trained.yml') # OpenCv allows us to save our trained model as a yaml which can be reused in other files instead of going through the entire training process again
np.save('features.npy', features)
np.save('labels.npy', labels)
| ajinkeya17/OpenCV-Course | Codebase/face_recognition_training.py | face_recognition_training.py | py | 2,247 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
5784254080 | from unittest import mock
import bson
import pytest
from test.tools import anything, in_any_order
from slivka import JobStatus
from slivka.db.documents import JobRequest, ServiceState
from slivka.db.helpers import delete_many, insert_many, pull_many
from slivka.scheduler import Runner, Scheduler
from slivka.scheduler.runners import Job, RunnerID
from slivka.scheduler.scheduler import (
ERROR,
REJECTED,
ExecutionDeferred,
ExecutionFailed,
)
def new_runner(service, name, command=None, args=None, env=None):
return Runner(
RunnerID(service, name),
command=command or [],
args=args or [],
outputs=[],
env=env or {},
)
@pytest.fixture()
def mock_batch_start():
with mock.patch.object(Runner, "batch_start") as mock_method:
yield mock_method
@pytest.fixture()
def mock_check_status():
with mock.patch.object(Runner, "check_status") as mock_method:
yield mock_method
@pytest.fixture()
def mock_submit():
with mock.patch.object(Runner, "submit") as mock_method:
yield mock_method
def test_group_requests(job_directory):
scheduler = Scheduler(job_directory)
runner1 = new_runner("example", "runner1")
runner2 = new_runner("example", "runner2")
scheduler.add_runner(runner1)
scheduler.add_runner(runner2)
scheduler.selectors["example"] = lambda inputs: inputs.get("use")
requests = [
JobRequest(service="example", inputs={"use": "runner1"}),
JobRequest(service="example", inputs={"use": "runner2"}),
JobRequest(service="example", inputs={"use": None}),
JobRequest(service="example", inputs={"use": "runner1"}),
]
grouped = scheduler.group_requests(requests)
assert grouped == {
runner1: in_any_order(requests[0], requests[3]),
runner2: in_any_order(requests[1]),
REJECTED: in_any_order(requests[2]),
}
def test_group_requests_if_runner_does_not_exist(job_directory):
scheduler = Scheduler(job_directory)
runner1 = new_runner("example", "runner1")
scheduler.add_runner(runner1)
scheduler.selectors["example"] = lambda inputs: "runner2"
requests = [JobRequest(service="example", inputs={})]
grouped = scheduler.group_requests(requests)
assert grouped == {ERROR: in_any_order(*requests)}
def create_requests(count=1, service="example"):
return [
JobRequest(
_id=bson.ObjectId(), service=service, inputs={"input": "val%d" % i}
)
for i in range(count)
]
def test_start_requests_if_successful_start(job_directory, mock_batch_start):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "example")
requests = [
JobRequest(
_id=bson.ObjectId(), service="example", inputs={"input": "val"}
),
JobRequest(
_id=bson.ObjectId(), service="example", inputs={"input": "val2"}
),
]
mock_batch_start.side_effect = lambda inputs, cwds: (
[Job("%04x" % i, cwd) for i, cwd in enumerate(cwds)]
)
started = scheduler._start_requests(runner, requests)
assert started == in_any_order(
*((req, Job("%04x" % i, anything())) for i, req in enumerate(requests))
)
def test_start_requests_deferred_execution_if_error_raised(
job_directory, mock_batch_start
):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "example")
requests = create_requests(2)
mock_batch_start.side_effect = OSError
with pytest.raises(ExecutionDeferred):
scheduler._start_requests(runner, requests)
def test_start_request_failed_execution_if_too_many_errors_raised(
job_directory, mock_batch_start
):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "example")
requests = create_requests(3)
scheduler.set_failure_limit(0)
mock_batch_start.side_effect = OSError
with pytest.raises(ExecutionFailed):
scheduler._start_requests(runner, requests)
class TestJobStatusUpdates:
@pytest.fixture()
def requests(self, database):
requests = create_requests(5)
insert_many(database, requests)
yield requests
delete_many(database, requests)
@pytest.fixture()
def scheduler(self, job_directory):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "example")
scheduler.add_runner(runner)
scheduler.selectors["example"] = lambda inputs: "example"
return scheduler
@pytest.mark.parametrize("status", list(JobStatus))
def test_check_status_updates_requests(
self,
scheduler,
requests,
database,
mock_batch_start,
mock_check_status,
status,
):
# must start the job, before moving to status check stage
mock_batch_start.side_effect = lambda inputs, cwds: (
[Job("%04x" % i, cwd) for i, cwd in enumerate(cwds)]
)
mock_check_status.return_value = status
scheduler.main_loop()
pull_many(database, requests)
assert all(req.state == status for req in requests)
def test_submit_deferred_job_status_not_updated(
self, scheduler, requests, database, mock_submit
):
mock_submit.side_effect = OSError
scheduler.main_loop()
pull_many(database, requests)
assert all(req.state == JobStatus.ACCEPTED for req in requests)
def test_submit_failed_job_status_set_to_error(
self, scheduler, requests, database, mock_submit
):
mock_submit.side_effect = OSError
scheduler.set_failure_limit(0)
scheduler.main_loop()
pull_many(database, requests)
assert all(req.state == JobStatus.ERROR for req in requests)
class TestServiceStatusUpdates:
@pytest.fixture(autouse=True)
def requests(self, database):
requests = create_requests(5)
insert_many(database, requests)
yield requests
delete_many(database, requests)
@pytest.fixture()
def scheduler(self, job_directory):
scheduler = Scheduler(job_directory)
runner = new_runner("example", "default")
scheduler.add_runner(runner)
return scheduler
def test_service_start_successful(
self, database, scheduler, mock_submit, mock_check_status
):
mock_submit.side_effect = lambda cmd: Job("0x00", cmd.cwd)
mock_check_status.return_value = JobStatus.QUEUED
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.OK
def test_service_start_soft_fail(self, database, scheduler, mock_submit):
mock_submit.side_effect = OSError
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.WARNING
def test_service_start_hard_fail(self, database, scheduler, mock_submit):
scheduler.set_failure_limit(0)
mock_submit.side_effect = OSError
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.DOWN
@pytest.mark.xfail(reason="service status should not rely on erroneous jobs")
def test_service_check_status_returned_all_errors(
self, database, scheduler, mock_submit, mock_check_status
):
mock_submit.side_effect = lambda cmd: Job("0x00", cmd.cwd)
mock_check_status.return_value = JobStatus.ERROR
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.DOWN
def test_service_check_status_throws_exception(
self, database, scheduler, mock_submit, mock_check_status
):
mock_submit.side_effect = lambda cmd: Job("0x00", cmd.cwd)
mock_check_status.side_effect = Exception
scheduler.main_loop()
state = ServiceState.find_one(
database, service="example", runner="default"
)
assert state.state == ServiceState.WARNING
| bartongroup/slivka | test/scheduler/test_scheduler.py | test_scheduler.py | py | 8,266 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "slivka.scheduler.Runner",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "slivka.scheduler.runners.RunnerID",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 32,
"usage_type": "call"
},
{
... |
12420365719 | from django.urls import path
from . import views
# register app namespace which is going to be used in URL names
app_name = "my_app"
urlpatterns = [
path("", views.example_view, name="example"),
path("variable/", views.variable_view, name="variable")
] | felixdusengimana/python-django-web-development | 04 Templates/template_study/my_app/urls.py | urls.py | py | 262 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
}
] |
23702247236 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__all__ = ["IterativeTwoDSearch"]
import os
import h5py
import numpy as np
from .two_d_search import TwoDSearch
from ._grid_search import grid_search
class IterativeTwoDSearch(TwoDSearch):
cache_ext = ".h5"
query_parameters = dict(
min_period=(None, True),
max_period=(None, True),
delta_log_period=(None, False),
dt=(None, False),
alpha=(None, False),
npeaks=(3, False),
mask_frac=(2.0, False),
min_points=(500, False),
min_transits=(3, False),
)
def get_alpha(self, query, parent_response):
a = query.get("alpha", None)
if a is not None:
return float(a)
lcs = parent_response.model_light_curves
n = sum(len(lc.time) for lc in lcs)
k = parent_response.nbasis
return k * np.log(n)
def get_result(self, query, parent_response):
periods = self.get_period_grid(query, parent_response)
dt = self.get_offset_spacing(query, parent_response)
alpha = self.get_alpha(query, parent_response)
# Get the parameters of the time grid from the 1-d search.
time_spacing = parent_response.time_spacing
mean_time = parent_response.mean_time_1d
tmin = parent_response.min_time_1d - mean_time
tmax = parent_response.max_time_1d - mean_time
time_grid = np.arange(0, tmax-tmin, time_spacing)
# Get the results of the 1-d search.
depth_1d = np.array(parent_response.depth_1d)
depth_ivar_1d = np.array(parent_response.depth_ivar_1d)
dll_1d = np.array(parent_response.dll_1d)
# Find the peaks.
peaks = []
for _ in range(query["npeaks"]):
# Run a 2D search.
results = grid_search(query["min_transits"], alpha,
tmin, tmax, time_spacing, depth_1d,
depth_ivar_1d, dll_1d, periods, dt)
(t0_2d, phic_same, phic_same_2, phic_variable, depth_2d,
depth_ivar_2d) = results
# Profile over duration.
inds = np.arange(len(phic_same)), np.argmax(phic_same, axis=1)
t0_2d = t0_2d[inds]
depth_2d = depth_2d[inds]
depth_ivar_2d = depth_ivar_2d[inds]
phic_same = phic_same[inds]
phic_variable = phic_variable[inds]
phic_same_2 = phic_same_2[inds]
# Find the top peak.
s2n = depth_2d * np.sqrt(depth_ivar_2d)
top_peak = np.argmax(s2n)
p, t0 = periods[top_peak], t0_2d[top_peak]
duration = query["durations"][inds[1][top_peak]]
# Save the peak.
peaks.append(dict(
period=p, t0=(t0 + tmin + mean_time) % p,
duration=duration,
depth=depth_2d[top_peak],
depth_ivar=depth_ivar_2d[top_peak],
s2n=s2n[top_peak],
phic_same=phic_same[top_peak],
phic_same_second=phic_same_2[top_peak],
phic_variable=phic_variable[top_peak],
duty_cycle=np.sum(depth_ivar_1d > 0.0) / len(depth_ivar_1d),
data_span=tmax - tmin,
))
# Mask out these transits.
m = (np.abs((time_grid-t0+0.5*p) % p-0.5*p)
< query["mask_frac"]*duration)
depth_1d[m] = 0.0
depth_ivar_1d[m] = 0.0
dll_1d[m] = 0.0
if (np.sum(np.any(depth_ivar_1d > 0.0, axis=1))
< query["min_points"]):
break
return dict(
peaks=peaks,
)
def save_to_cache(self, fn, response):
try:
os.makedirs(os.path.dirname(fn))
except os.error:
pass
# Parse the peaks into a structured array.
peaks = response["peaks"]
if len(peaks):
dtype = [(k, np.float64) for k in sorted(peaks[0].keys())]
peaks = [tuple(peak[k] for k, _ in dtype) for peak in peaks]
peaks = np.array(peaks, dtype=dtype)
with h5py.File(fn, "w") as f:
f.create_dataset("peaks", data=peaks, compression="gzip")
def load_from_cache(self, fn):
if os.path.exists(fn):
with h5py.File(fn, "r") as f:
try:
peaks = [dict((k, peak[k]) for k in peak.dtype.names)
for peak in f["peaks"]]
return dict(
peaks=peaks,
)
except KeyError:
pass
return None
| dfm/ketu | ketu/iterative.py | iterative.py | py | 4,714 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "two_d_search.TwoDSearch",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.log",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"lin... |
16178639967 | # -*- coding: utf-8 -*-
from high2low import Changer_low as ch_low
import utils as util
#from low2high import Changer_high as ch_high
import is_horl as is_horl
txt = input("Enter Korean Sentence: ")
ch = ch_low()
#ch_high = ch_high()
hi = is_horl.isHigh()
detect=hi.isThisHigh(txt)
# 높임말
if detect ==1:
hi.getState(detect)
output = ch.processText(txt)
print("Converted Result:", output)
# 반말
else:
hi.getState(detect)
output = util.tohigh(txt)
print("Converted Result:", output)
| joowhan/Translation_Project | lab/highlow_factory/ver3_chari/src/test.py | test.py | py | 523 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "high2low.Changer_low",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "is_horl.isHigh",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.tohigh",
"line_number": 23,
"usage_type": "call"
}
] |
32274499758 | #!/opt/csw/bin/python
# coding=utf-8
import re
import fileinput
from time import time
from datetime import datetime
urlRe = re.compile('(http://www\.|https://www\.|http://|https://|www\.)(?P<link>\S+)')
youtubeUrlRe = re.compile('(youtube\.com/watch\?v=|youtube\.com/watch\?.*&v=|youtu.be/)(?P<id>[A-Za-z0-9_-]{11})')
def getResponseType():
return "MSG"
def get(msg, author, folder):
urls = re.findall(urlRe, msg)
if (not urls):
return
urls = [prepareUrl(url) for url in urls if not is4chan(url)]
urls = list(set(urls))
f = open(folder + "/links.txt","r")
lines = f.readlines()
f.close()
response = []
for index, line in enumerate(lines):
if not urls:
break;
data = line.rstrip().split(" ")
found = None
for url in urls:
if (data[0] != url):
continue
count = int(data[1])
countStr = "(x" + str(count) + ")" if count > 1 else ""
nick = "<" + data[2] + ">"
firstTime = datetime.fromtimestamp(int(data[3])).strftime("%d/%m/%Y %H:%M:%S")
response.append("old!!! " + countStr + " Algselt linkis " + nick + " " + firstTime)
lines[index] = buildLine(data[0], count + 1, data[2], data[3])
found = url
if found is not None:
urls.remove(found)
f = open(folder + "/links.txt","w")
for line in lines:
f.write(line)
for url in urls:
timestamp = str(int(time()))
line = buildLine(url, 1, author, timestamp)
f.write(line)
f.close()
return response
def buildLine(url, count, nick, timestamp):
count = str(count)
return url + " " + count + " " + nick + " " + timestamp + "\n"
def is4chan(url):
return "4cdn.org" in url[1]
def prepareUrl(url):
url = url[1]
youtubeUrl = re.findall(youtubeUrlRe, url)
if (youtubeUrl):
return youtubeUrl[0][1]
if url[-1:] == "/":
url = url[:-1]
return url | sviik/marju | plugin/interceptor/old/__init__.py | __init__.py | py | 2,001 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
... |
31524008748 | import fitz
from loguru import logger
def get_page_from_sheet(sheet: str, pdf_fpath=None, doc=None):
"""
Get the page number from the sheet.
"""
if (pdf_fpath is not None) and (doc is not None):
raise ValueError("Only one of pdf_fpath or doc can be specified.")
if pdf_fpath:
doc = fitz.open(pdf_fpath)
# check each page
for i in range(len(doc)-1, -1, -1): # iterate backwards over all pages
page = doc[i]
# define the rectangles representing the corners of the page
parts = 4
corners = [
fitz.Rect(0, 0, page.rect.width / parts, page.rect.height / parts), # top left
fitz.Rect(page.rect.width / parts, 0, page.rect.width, page.rect.height / parts), # top right
fitz.Rect(0, page.rect.height / parts, page.rect.width / parts, page.rect.height), # bottom left
fitz.Rect(page.rect.width / parts, page.rect.height / parts, page.rect.width, page.rect.height) # bottom right
]
# check each of the four corners of the page for the sheet number
for corner in corners:
matches = page.search_for(sheet, hit_max=1, area=corner)
if matches: # if the sheet number is found
logger.info(f"Sheet number {sheet} found on page {i} at location {matches[0]}")
return i, matches[0] # return the page number (0-indexed)
return None # if the sheet number is not found on any page | fuzzy-tribble/meche-copilot | meche_copilot/pdf_helpers/get_page_from_sheet.py | get_page_from_sheet.py | py | 1,478 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "fitz.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fitz.Rect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "fitz.Rect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "fitz.Rect",
"line_number": 23,
... |
15698017832 | from django.shortcuts import render
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from django.db.models import Q
from .serializer import GetCommentSerializer,AddCommentModelserializer,AddReplyCommentModelSerializer
from .models import Comment
from rest_framework.pagination import PageNumberPagination
from rest_framework import generics
# class GetComments (APIView):
# def get (self,request,id):
# comment = Comment.objects.filter(Q(product__id=id) & Q(reply=None) & Q(status=True))
# serializer = GetCommentSerializer(comment,many=True)
# return Response(serializer.data , status=status.HTTP_200_OK)
class StandardResultsSetPagination(PageNumberPagination):
page_size = 3
page_size_query_param = 'page_size'
max_page_size = 10
class GetComments (generics.ListAPIView):
# queryset = Product.objects.filter(category=8)
serializer_class = GetCommentSerializer
pagination_class = StandardResultsSetPagination
lookup_url_kwarg = "id"
def get_queryset(self):
id_product = self.kwargs.get(self.lookup_url_kwarg)
comment = Comment.objects.filter(Q(product__id=id_product) & Q(reply=None) & Q(status=True)).order_by("-id")
return comment
class AddComment (APIView):
permission_classes=[IsAuthenticated]
def post(self,request):
serializer = AddCommentModelserializer(data = request.data)
serializer.is_valid(raise_exception=True)
serializer.validated_data['user']=request.user
serializer.save()
return Response(status=status.HTTP_200_OK)
class AddReplyComment(APIView):
permission_classes=[IsAuthenticated]
def post (self , request):
serializer = AddReplyCommentModelSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.validated_data['user']=request.user
serializer.save()
return Response(status=status.HTTP_200_OK) | mohammad-reza-sasani/online-shop-react-django | backend/comment/views.py | views.py | py | 2,202 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.pagination.PageNumberPagination",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "rest_framework.generics.ListAPIView",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 25,
"usa... |
1147807834 | """empty message
Revision ID: b7c0cfa43719
Revises: 25279a0b5c75
Create Date: 2016-11-02 00:02:18.768539
"""
# revision identifiers, used by Alembic.
revision = 'b7c0cfa43719'
down_revision = '25279a0b5c75'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('twilio', sa.Column('state_number', sa.String(length=255), nullable=True))
op.add_column('user', sa.Column('state_number', sa.String(length=255), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'state_number')
op.drop_column('twilio', 'state_number')
### end Alembic commands ###
| CodeForProgress/sms-app | src/migrations/versions/b7c0cfa43719_.py | b7c0cfa43719_.py | py | 760 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String"... |
8806299537 | from django.test import TestCase
from django.urls import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.utils.html import escape
from lists.views import home_page, view_list
from lists.models import Item, List
from lists.forms import STR_EMPYT_LIST_ERROR, ItemForm, ExistingListItemForm
# Create your tests here.
class SmokeTest(TestCase):
def test_bad_maths(self):
self.assertEqual(1 + 1, 2)
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve("/")
self.assertEqual(found.func, home_page)
def test_0001_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
content = response.content.decode("utf-8-sig").encode('utf-8')
# print(f'{type(content) =}, {content}')
self.assertTrue(content.startswith(b"<html>"))
self.assertIn(b'<title>To-Do Lists</title>', content)
self.assertTrue(content.endswith(b"</html>"))
# failed for csrf
# self.assertEqual(response.content.decode(), render_to_string('home.html'))
'''
def test_0004_home_page_displays_all_list_item(self):
list_ = List.objects.create()
Item.objects.create(text='itemey 1', list=list_)
Item.objects.create(text='itemey 2', list=list_)
request = HttpRequest()
response = view_list(request)
self.assertIn('itemey 1', response.content.decode())
self.assertIn('itemey 2', response.content.decode())
'''
class ListViewTest(TestCase):
'''
def test_users_list_template(self):
response = self.client.get('/lists/all/')
self.assertTemplateUsed(response, 'list.html')
'''
def test_0002_users_list_template(self):
list_ = List.objects.create()
response = self.client.get(f'/lists/{list_.id}/')
self.assertTemplateUsed(response, 'list.html')
def test_0002a_users_list_template(self):
list_ = List.objects.create()
response = self.client.get(f'/lists/{list_.id}/')
self.assertIsInstance(response.context['form'], ExistingListItemForm)
self.assertContains(response, 'name="text"')
def test_0003_display_only_items_for_that_list(self):
list1 = List.objects.create()
Item.objects.create(text='itemey 1.1', list=list1)
Item.objects.create(text='itemey 1.2', list=list1)
list2 = List.objects.create()
Item.objects.create(text='itemey 2.1', list=list2)
Item.objects.create(text='itemey 2.2', list=list2)
response = self.client.get(f"/lists/{list1.id}/")
content = response.content.decode("utf-8-sig").encode('utf-8')
# print(f'{type(content) =}, {content}')
self.assertContains(response, 'itemey 1.1')
self.assertContains(response, 'itemey 1.2')
self.assertNotContains(response, 'itemey 2.1')
self.assertNotContains(response, 'itemey 2.2')
"""
def test_display_all_items(self):
list_ = List.objects.create()
Item.objects.create(text='itemey 1', list=list_)
Item.objects.create(text='itemey 2', list=list_)
response = self.client.get("/lists/all/")
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
"""
def test_0005_passes_correst_list_to_templeate(self):
list2 = List.objects.create()
list1 = List.objects.create()
response = self.client.get(f'/lists/{list1.id}/')
self.assertEqual(response.context['list'], list1)
def test_0006_validation_error_end_up_on_list_page(self):
list_ = List.objects.create()
response = self.client.post(f'/lists/{list_.id}/', data={"text": ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
content = response.content.decode("utf-8-sig").encode('utf-8')
# print(f'test_0006_validation_error_end_up_on_list_page {type(content) =}, {content}')
self.assertContains(response, escape("You can't have an empty list item"))
def post_invalid_input(self):
list_ = List.objects.create()
response = self.client.post(f'/lists/{list_.id}/', data={"text": ''})
return response
def test_0007_invalid_input_nothing_saved_to(self):
self.post_invalid_input()
self.assertEqual(Item.objects.count(), 0)
def test_0008_invalid_input_renders_list_template(self):
response = self.post_invalid_input()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
def test_0009_invalid_input_passes_form_to_template(self):
response = self.post_invalid_input()
self.assertIsInstance(response.context['form'], ExistingListItemForm)
def test_0010_invalid_input_shows_error_on_page(self):
response = self.post_invalid_input()
self.assertContains(response, escape(STR_EMPYT_LIST_ERROR))
class NewListTest(TestCase):
def test_0001_saving_a_POST_request(self):
# print(f'Before post')
self.client.post('/lists/new', data={'text': 'A new list item'})
# print(f'{Item.objects.count()}, {Item.objects =}')
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_0003_home_page_redirect_after_post(self):
response = self.client.post('/lists/new', data={'text': 'A new list item'})
list_ = List.objects.first()
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], f'/lists/{list_.id}/')
def test_0004a_validation_errors_are_sent_back_to_home_page_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_0004b_validation_errors_are_sent_back_to_home_page_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertContains(response, escape(STR_EMPYT_LIST_ERROR))
def test_0004c_validation_errors_are_sent_back_to_home_page_template(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertIsInstance(response.context['form'], ItemForm)
def test_0005_invalid_list_items_arent_saved(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
class NewItemTest(TestCase):
def test_0001_can_save_a_POST_request_to_an_existing_list(self):
list2 = List.objects.create()
list1 = List.objects.create()
response = self.client.post(f'/lists/{list1.id}/',
data={'text': 'A new list item for existing list'})
# print(f'test_0001_can_save_a_POST_request_to_an_existing_list: {response.status_code}, {response = }')
# print(f'{list(Item.objects.all()) = }')
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item for existing list')
self.assertEqual(new_item.list, list1)
def test_0002_redirect_to_list_view(self):
list2 = List.objects.create()
list1 = List.objects.create()
response = self.client.post(f'/lists/{list1.id}/',
data={'text': 'A new list item for existing list'})
self.assertRedirects(response, f'/lists/{list1.id}/')
| juewuer/python-web-dev | superlists/lists/tests/test_views.py | test_views.py | py | 7,695 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.test.TestCase",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.urls.resolve",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "lists.v... |
42866864407 | """
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import collections
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
queue = collections.deque([root])
z_traversal = []
num_level = 0
while queue:
level = collections.deque([])
for i in range(len(queue)):
head = queue.popleft()
# KEY: different order to put in list
if num_level % 2 == 0:
level.append(head.val)
else:
level.appendleft(head.val)
if head.left:
queue.append(head.left)
if head.right:
queue.append(head.right)
z_traversal.append(level)
num_level += 1
return z_traversal | allen791210/LeetCode | 103_Binary_Tree_Zigzag_Level_Order_Traversal.py | 103_Binary_Tree_Zigzag_Level_Order_Traversal.py | py | 1,289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 38,
"usage_type": "call"
}
] |
3237711712 | from django.db import models
from rest_framework import serializers
gender=(
("male","Male"),
("female","Female"),
)
status=(
("Done","Done"),
("Pending","Pending"),
)
Data=(
("share_all_data","Share All Data"),
("share_alerts_only","Share Alerts Only"),
)
communication=(
("email","Email"),
("sms","SMS"),
)
relationship=(
("parent","parent"),
("spouse","spouse"),
("children","children"),
)
Device = (
('ios','ios'),
('android','android'),
('web','web'),
)
# Create your models here.
class Patient_Account(models.Model):
Patient_Account_Id = models.AutoField(primary_key=True)
Full_Name=models.TextField(default="")
First_Name=models.CharField(max_length=100, default="")
Last_Name=models.CharField(max_length=100, default="")
Email=models.TextField(default="")
Username=models.TextField(default="")
Gender=models.CharField(max_length=100, default="")
Date_of_Birth=models.CharField(max_length=500, default="")
Password=models.TextField(default="")
Street_Address=models.CharField(max_length=500, default="")
City=models.CharField(max_length=500, default="")
State=models.CharField(max_length=500, default="")
Country=models.CharField(max_length=500, default="")
Role=models.CharField(max_length=100,default="patient")
Patient_Account_Image=models.ImageField(upload_to='Patient/',default="dummyprofile.jpg")
Mobile_Number = models.CharField(max_length=200, default="")
Email_Verification_Code = models.CharField(max_length=200, default="")
Email_Verification_Timestatus = models.CharField(max_length=200, default="False")
Email_Verification_usestatus = models.CharField(max_length=200, default="False")
OTP_Verification = models.CharField(max_length=200, default="12345")
ohip_number = models.TextField(default="")
date_of_issue = models.TextField(default="")
date_of_expiry = models.TextField(default="")
ohip_Status = models.CharField(max_length=500, default="")
Email_Verificatication_Status = models.CharField(max_length=500, default="False")
Sender_ID = models.TextField(default="")
Device_type = models.CharField(max_length=100,choices=Device,default="android")
Message_Count = models.CharField(max_length=20,default="0")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey('ear_health_professional.Clinics_Branch' , on_delete=models.CASCADE,blank=True,null=True)
Clinic_Remove_Status = models.CharField(max_length=500, default="True")
def __str__(self):
return self.Full_Name
class Card_detail(models.Model):
Card_detail_Id=models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Card_number=models.CharField(max_length=100,default="0")
Cvc=models.IntegerField(default="12345")
expiration_date=models.DateField(blank=True, null=True)
created_at=models.DateTimeField(auto_now_add=True,blank=True, null=True)
Charge_Day=models.DateTimeField(auto_now_add=True,blank=True, null=True)
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return self.Card_number
class Billing_Details(models.Model):
Billing_Details_id=models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Street_Address=models.TextField(default="")
Country=models.TextField(default="")
State=models.TextField(default="")
City=models.TextField(default="")
Postal_Code=models.TextField(default="")
Email=models.TextField(default="")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return self.Country
class Insurance(models.Model):
Insurance_id = models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
insuarance_number =models.TextField(default="")
date_of_issue = models.TextField(default="")
date_of_expiry = models.TextField(default="")
insurance_company_name = models.TextField(default="")
class Book_Appointment(models.Model):
Book_Appointment_id= models.AutoField(primary_key=True)
Problem=models.TextField(default="+4lISovpyV6DwPqRNcKmFvtDUyL3LLzPP9wCR3oIKMbT44gGXvC2F3EL1IvyY9MP3SmuuP5L69iN0ZJ8dJXEAQ==")
Completion=models.TextField(default="akjMaPmdwYqc2btwftgMOLe5H1/7BQpJUJMTLVdnVZbfcEVgXZvf8W8njyEEotQF8Q1hq850qnBFDLA/FZ9c6Q==")
Billing_Details_id=models.ForeignKey(Billing_Details,on_delete=models.CASCADE,blank=True,null=True)
Health_Professional_id=models.ForeignKey('ear_health_professional.Health_Professional_Account',on_delete=models.CASCADE,blank=True,null=True)
Date=models.CharField(max_length=500,default="")
Time=models.CharField(max_length=500,default="")
Date_of_origin=models.DateTimeField(auto_now_add=True,blank=True, null=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Status=models.CharField(default="Pending",max_length=20)
Doctor_Online_Status=models.CharField(default="False",max_length=20)
Hospital_id=models.ForeignKey('ear_health_professional.Hospital',on_delete=models.CASCADE,blank=True,null=True)
Channel_id = models.CharField(max_length=500,default="")
Appointment_Rating = models.IntegerField(default=0)
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
Cash_on_Arrival = models.CharField(max_length=500,default="False")
Online_Payment = models.CharField(max_length=500,default="False")
is_Paid = models.CharField(max_length=8,default="False")
Paypal_Payment = models.CharField(max_length=500,default="False")
Ohipe_Payment = models.CharField(max_length=500,default="False")
Insurance_Payment = models.CharField(max_length=500,default="False")
Accept_Reject_Status = models.CharField(max_length=500,default="Pending")
Doctor_Slot_Timing = models.CharField(max_length=500,default="")
Doctor_Notes = models.TextField(default="wt1lvNv9BdDP4iPKwsHoJwlWUg65Z3kIEGdEn4AZbEU/mRiiiz3TLZE5HZMCx7qWt8uJvAsH7WufJRhc+0OeeA==")
Doctor_Prescription = models.TextField(default="wt1lvNv9BdDP4iPKwsHoJwlWUg65Z3kIEGdEn4AZbEU/mRiiiz3TLZE5HZMCx7qWt8uJvAsH7WufJRhc+0OeeA==")
Medical_Diagnosis = models.TextField(default="yqrxWDqA9m4g/fkhdmp1jkBC1pXHyh60EwwBzdCLGGM=")
Doctor_Read_Message = models.CharField(max_length=20,default="0")
Patient_Read_Message = models.CharField(max_length=20,default="0")
Patient_rating_Status = models.CharField(max_length=20,default="False")
PDF_data = models.TextField(default="")
class General_Patient_Information(models.Model):
General_Patient_Information_id = models.AutoField(primary_key=True)
Book_Appointment_id=models.ForeignKey(Book_Appointment,on_delete=models.CASCADE,blank=True,null=True)
Patient_Gender = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Name = models.TextField(default="")
Patient_First_Name = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Last_Name = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_DOB = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Height = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Weight = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_Email = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_reason = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
# Patient Medical History
Patient_drug_allergies = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_disease_list = models.TextField(default="")
Patient_other_illness = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_List_any_operations = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Patient_List_of_Current_Medications = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
# Healthy & Unhealthy Habits
Exercise = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Eating_following_a_diet =models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Alcohol_Consumption = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Caffeine_Consumption = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Do_you_smoke = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
Medical_History = models.TextField(default="gjPDPWj20N5YisvSFYRVnzZm77L0i9YxFpf/TfngMNI=")
class Ser_Appointment(serializers.ModelSerializer):
Patient_Name=serializers.ReadOnlyField(source="Patient_id.Username")
Patient_Username=serializers.ReadOnlyField(source="Patient_id.Username")
Patient_Gender=serializers.ReadOnlyField(source="Patient_id.Gender")
Patient_Country=serializers.ReadOnlyField(source="Patient_id.Country")
Date_of_Birth=serializers.ReadOnlyField(source="Patient_id.Date_of_Birth")
Health_Professional_id=serializers.ReadOnlyField(source="Health_Professional_id.Health_Professional_Id")
Health_Professional_Username=serializers.ReadOnlyField(source="Health_Professional_id.Username")
Health_Professional_Full_Name=serializers.ReadOnlyField(source="Health_Professional_id.Full_Name")
Hospital_id = serializers.ReadOnlyField(source="Hospital_id.Hospital_id")
Hospital_Name = serializers.ReadOnlyField(source="Hospital_id.Hospital_Name")
About = serializers.ReadOnlyField(source="Hospital_id.About")
Status = serializers.ReadOnlyField(source="Hospital_id.Status")
More_Mapinfo = serializers.ReadOnlyField(source="Hospital_id.More_Mapinfo")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
Appointment_Status = serializers.ReadOnlyField(source = "Status")
class Meta:
model = Book_Appointment
fields = ('Patient_id','Patient_Name','Patient_Username','Patient_Gender','Patient_Country','Problem','Completion','Date','Time','Date_of_origin','Status','Book_Appointment_id','Date_of_Birth','Doctor_Notes','Doctor_Prescription','Health_Professional_id','Health_Professional_Username','Health_Professional_Full_Name','Hospital_id','Hospital_Name','About','Status','More_Mapinfo','Doctor_Online_Status','Channel_id','Accept_Reject_Status','Cash_on_Arrival','Online_Payment','Ohipe_Payment','Insurance_Payment','Appointment_Status')
class Messages(models.Model):
Messages_id = models.AutoField(primary_key=True)
Message = models.TextField(default="")
Book_Appointment_id=models.ForeignKey(Book_Appointment , on_delete=models.CASCADE,blank=True,null=True)
Health_Professional_id=models.ForeignKey('ear_health_professional.Health_Professional_Account',on_delete=models.CASCADE,blank=True,null=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Role = models.CharField(max_length=20,default="")
Status = models.CharField(max_length=20,default="False")
Doctor_Read_Status = models.CharField(max_length=20,default="False")
Patient_Read_Status = models.CharField(max_length=20,default="False")
Date = models.CharField(max_length=20,default="False")
Time = models.CharField(max_length=20,default="False")
class SerMessage(serializers.ModelSerializer):
class Meta:
model = Messages
fields = '__all__'
class Doctor_Image(models.Model):
Doctor_Image_id=models.AutoField(primary_key=True)
Book_Appointment_id=models.ForeignKey(Book_Appointment, on_delete=models.CASCADE)
img=models.ImageField(upload_to='Appointment/',default="dummy.jpg")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return str(self.Doctor_Image_id)
class MultipleImages(models.Model):
MultipleImages_id=models.AutoField(primary_key=True)
Book_Appointment_id=models.ForeignKey(Book_Appointment, on_delete=models.CASCADE)
img=models.ImageField(upload_to='Appointment/',default="dummy.jpg")
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return str(self.MultipleImages_id)
class Add_Caregiver(models.Model):
Add_Caregiver_id=models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Name=models.CharField(max_length=500, default="")
Email=models.EmailField(max_length=500, default="")
Mobile_Number=models.CharField(max_length=500, default="")
Relationship=models.CharField(max_length=500, default="",choices=relationship)
Data=models.CharField(max_length=500, default="",choices=Data)
Communication=models.CharField(max_length=500, default="",choices=communication)
HospitalAccount_Id=models.ForeignKey('ear_health_professional.HospitalAccount' , on_delete=models.CASCADE,blank=True,null=True)
Clinics_BranchId=models.ForeignKey( 'ear_health_professional.Clinics_Branch', on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return self.Name
class Patient_Recent_visit(models.Model):
Patient_Recent_visit_id = models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Health_Professional_id=models.ForeignKey('ear_health_professional.Health_Professional_Account',on_delete=models.CASCADE,blank=True,null=True)
class Patient_Favorited(models.Model):
Patient_Favorited_id = models.AutoField(primary_key=True)
Patient_id=models.ForeignKey(Patient_Account,on_delete=models.CASCADE,blank=True,null=True)
Health_Professional_id=models.ForeignKey('ear_health_professional.Health_Professional_Account',on_delete=models.CASCADE,blank=True,null=True)
| AdnanSiddiqui96/Projects-Backup | TestProject/Patient/models.py | models.py | py | 15,374 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 35,
"usage_type": "call"
},
{
"api_name"... |
5663610357 | import methods
import matplotlib.pyplot as plt
def func(x, y):
return 1 + 1.8*y*methods.np.sin(x) - y**2
x0 = 0
y0 = 0
x_end = 6
h = 0.1
x1, y1, h = methods.runge_kutta(x0, y0, x_end, func, h, True)
x1_halved, y1_halved = methods.runge_kutta(x0, y0, x_end, func, h/2, False)
x2, y2 = methods.adams(x1[:4], y1[:4], x_end, func, h)
x2_halved, y2_halved = methods.adams(x1_halved[:4], y1_halved[:4], x_end, func, h/2)
e1 = methods.evaluate_error_runge(y1, y1_halved, 4)
e2 = methods.evaluate_error_runge(y2, y2_halved, 4)
print(f" N x Runge-Kutta e1 Adams e2")
for i in range(len(x1)):
print(f" {i:>3} {round(x1[i], 5):<6} {y1[i]:8.6f} {e1[i]:8.1e} {y2[i]:8.6f} {e2[i]:8.1e}")
fig = plt.gcf() # to be able to change window title
fig.canvas.set_window_title("Розв'язок")
plt.plot(x1, y1, 'b', label = "Метод Рунге-Кутта")
plt.plot(x2, y2, 'y', label = "Метод Адамса")
plt.legend(loc="best")
plt.show()
fig = plt.gcf() # to be able to change window title
fig.canvas.set_window_title("Похибка")
plt.plot(x1, e1, 'b', label = "Похибка методу Рунге-Кутта")
plt.plot(x2, e2, 'y', label = "Похибка методу Адамса")
plt.legend(loc="best")
plt.show() | Melkye/Labs | Math/Lab8_Koshi_problem/Lab8_Koshi_problem/Lab8_Koshi_problem.py | Lab8_Koshi_problem.py | py | 1,270 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "methods.np.sin",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "methods.np",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "methods.runge_kutta",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "methods.runge_kutta"... |
25917900708 | from __future__ import print_function
import argparse
import random
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from os import listdir
from os.path import join
from moviepy.editor import *
model = torch.hub.load('pytorch/vision', 'deeplabv3_resnet101', pretrained=True)
people_class = 15
model.eval()
print ("Model Loaded")
blur = torch.FloatTensor([[[[1.0, 2.0, 1.0],[2.0, 4.0, 2.0],[1.0, 2.0, 1.0]]]]) / 16.0
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
model.to('cuda')
blur = blur.to('cuda')
import urllib
from torchvision import transforms
preprocess = transforms.Compose([
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def makeSegMask(img):
frame_data = torch.FloatTensor( img ) / 255.0
input_tensor = preprocess(frame_data.permute(2, 0, 1))
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
with torch.no_grad():
output = model(input_batch)['out'][0]
segmentation = output.argmax(0)
bgOut = output[0:1][:][:]
a = (1.0 - F.relu(torch.tanh(bgOut * 0.30 - 1.0))).pow(0.5) * 2.0
people = segmentation.eq( torch.ones_like(segmentation).long().fill_(people_class) ).float()
people.unsqueeze_(0).unsqueeze_(0)
for i in range(3):
people = F.conv2d(people, blur, stride=1, padding=1)
# combined_mask = F.hardtanh(a * b)
combined_mask = F.relu(F.hardtanh(a * (people.squeeze().pow(1.5)) ))
combined_mask = combined_mask.expand(1, 3, -1, -1)
res = (combined_mask * 255.0).cpu().squeeze().byte().permute(1, 2, 0).numpy()
return res
def processMovie(args):
print("Processing {}... This will take some time.".format(args.input))
if args.width != 0:
target=[args.width, None]
else:
target=None
realityClip = VideoFileClip(args.input, target_resolution=target)
realityMask = realityClip.fl_image(makeSegMask)
realityMask.write_videofile(args.output)
def main():
parser = argparse.ArgumentParser(description='BGRemove')
parser.add_argument('--input', metavar='N', required=True,
help='input movie path')
parser.add_argument('--output', metavar='N', required=True,
help='output movie path')
parser.add_argument('--width', metavar='N', type=int, default=0,
help='target width (optional, omit for full width)')
args = parser.parse_args()
processMovie(args)
if __name__ == '__main__':
main()
| WhiteNoise/deep-bgremove | createmask.py | createmask.py | py | 2,592 | python | en | code | 61 | github-code | 36 | [
{
"api_name": "torch.hub.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.hub",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_availab... |
22420951914 | from django.shortcuts import render,redirect
from django.contrib import messages
from .models import Courses
def new(request):
context = {
'course': Courses.objects.all()
}
return render(request, 'new.html', context)
def create(request):
errors = Courses.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
Courses.objects.create(
name=request.POST['name'],
description=request.POST['description'],
)
return redirect('/')
def destroy(request, course_id):
one_course = Courses.objects.get(id=course_id)
context = {
'course': one_course
}
return render(request, 'destroy.html', context)
def delete(request, course_id):
to_delete =Courses.objects.get(id=course_id)
to_delete.delete()
return redirect('/')
| Wendy-Wu-Chiang/Python_stack | django/full_stack_django/courses_proj/courses_app/views.py | views.py | py | 952 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Courses.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Courses.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.Courses",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "dj... |
8195166573 | from PIL import Image, ImageOps, ImageDraw, ImageFont
from bot.config import PICS_DIRECTORY, QUOTATION_DATABASE
import textwrap
import sqlite3
import random
import os
import io
def get_random_quote():
conn = sqlite3.connect(QUOTATION_DATABASE)
cursor = conn.cursor()
count = cursor.execute('SELECT COUNT(*) FROM quotes;').fetchone()[0]
random_id = random.randint(1, count)
return cursor.execute('SELECT author, quote FROM quotes WHERE id = ?', (random_id, )).fetchone()[1]
def create_quote_photo():
img_quote = Image.new(mode="RGB", size=(850, 400))
img_quote = ImageOps.expand(img_quote, border=2, fill='white')
img_komaru = Image.open(os.path.join(PICS_DIRECTORY, random.choice(os.listdir(PICS_DIRECTORY))))
img_komaru = img_komaru.resize((int(img_komaru.size[0] * (350 / img_komaru.size[1])), 350))
img_quote.paste(img_komaru, (25, 25))
quote = get_random_quote()
font1 = ImageFont.truetype('times.ttf', size=20)
font2 = ImageFont.truetype('times.ttf', size=24)
draw_text = ImageDraw.Draw(img_quote)
margin = 420
offset = 25
for line in textwrap.wrap(quote, width=45):
draw_text.text((margin, offset), line, font=font1, fill="white")
offset += font1.getsize(line)[1]
author = '- Комару -'
draw_text.text((790 - font2.getsize(author)[0], 310), author, font=font2, fill="white")
byte_arr = io.BytesIO()
img_quote.save(byte_arr, format='PNG')
byte_arr.seek(0)
return byte_arr
| Ku6iKRu6Ika/quote-bot | bot/utils.py | utils.py | py | 1,504 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bot.config.QUOTATION_DATABASE",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PIL.... |
26634573192 | import requests
def request_demo():
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
param = {
"corpid":"ww93348658d7c66ef4",
"corpsecret":"T0TFrXmGYel167lnkzEydsjl6bcDDeXVmkUnEYugKIw"
}
proxy = {
"http": "http://127.0.0.1:8080",
"https": "http://127.0.0.1:8080"
}
res = requests.get(url=url, params=param, proxies =proxy, verify = False)
if __name__ == '__main__':
request_demo() | ceshiren/HogwartsSDET17 | test_mock/requests_demo.py | requests_demo.py | py | 447 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
19651817490 | from binance.client import Client
import pandas as pd
import matplotlib.pyplot as plt
import ta
data = Client().get_historical_klines("BTCUSDT", Client.KLINE_INTERVAL_1DAY, "01 JANUARY 2018")
df = pd.DataFrame(data, columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av', 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])
somme_investi = 0
benef = 0
del df['ignore']
del df['close_time']
del df['quote_av']
del df['trades']
del df['tb_base_av']
del df['tb_quote_av']
df['close'] = pd.to_numeric(df['close'])
df['high'] = pd.to_numeric(df['high'])
df['low'] = pd.to_numeric(df['low'])
df['open'] = pd.to_numeric(df['open'])
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
df['SMA50'] = ta.trend.sma_indicator(df['close'], 50)
df['SMA200'] = ta.trend.sma_indicator(df['close'], 200)
for i in range(len(df['SMA200']) - 1):
if df['SMA200'][i] > df['SMA50'][i] and df['SMA200'][i+1] < df['SMA50'][i+1]:
plt.annotate('BUY',
ha = 'center', va = 'bottom',
xytext = (df['timestamp'][i+1], df['SMA200'][i+1] + 5000),xy = (df['timestamp'][i+1], df['SMA200'][i+1]),arrowprops = {'facecolor' : 'green'})
benef -= df['open'][i+1]
somme_investi += df['open'][i+1]
print("ACHAT: " + str(df['open'][i+1]) + " USDT")
elif df['SMA200'][i] < df['SMA50'][i] and df['SMA200'][i+1] > df['SMA50'][i+1]:
plt.annotate('SELL',
ha = 'center', va = 'bottom',
xytext = (df['timestamp'][i+1], df['SMA200'][i+1] + 5000),xy = (df['timestamp'][i+1], df['SMA200'][i+1]),arrowprops = {'facecolor' : 'red'})
benef += df['open'][i+1]
print("VENTE: " + str(df['open'][i+1]) + " USDT")
print("SOMME INVESTIE: " + str(somme_investi - benef))
print("BENEFICE TOTAL: " + str(benef))
plt.plot(df['timestamp'], df['open'])
plt.plot(df['timestamp'], df['SMA50'], color='r')
plt.plot(df['timestamp'], df['SMA200'], color='g')
plt.show() | RaphaelFontaine/Trading | src/moving_average_crossing.py | moving_average_crossing.py | py | 1,984 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "binance.client.Client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "binance.client.Client.KLINE_INTERVAL_1DAY",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 7,
"usage_type": "call"
},
{
... |
10073007087 | import numpy as np
import json
import timeit
import os
import argparse
from pathlib import Path
import sys
from shapely.geometry import Polygon
import numpy as np
import numba
from inspect import getmembers
sys.path.append(os.path.realpath('hausdorff'))
from hausdorff_dist import hausdorff_distance
sys.path.append(os.path.realpath('yolov4'))
from tool.utils import *
from config import config
from utils import *
def parse_args():
argparser = argparse.ArgumentParser(
description='Data preparation for vehicle counting')
argparser.add_argument('-j', '--json_dir', type=str,
default='../data/json/', help='Json directory')
argparser.add_argument('-v', '--video_dir', type=str,
default='../data/video/', help='Video directory')
argparser.add_argument('-t', '--track_dir', type=str,
default='data/track', help='Detection result directory')
argparser.add_argument('-s', '--save_dir', type=str,
default='data/count', help='Save result')
args = vars(argparser.parse_args())
return args
def load_zone_anno(json_filename):
with open(json_filename) as jsonfile:
dd = json.load(jsonfile)
polygon = [(int(x), int(y)) for x, y in dd['shapes'][0]['points']]
paths = {}
for it in dd['shapes'][1:]:
kk = str(int(it['label'][-2:]))
paths[kk] = [(int(x), int(y)) for x, y in it['points']]
return polygon, paths
def check_bbox_overlap_with_roi(box, roi):
roi_poly = Polygon(roi)
x1, y1 = box[0], box[1]
x2, y2 = box[2], box[3]
box_poly = Polygon([(x1,y1), (x2, y1), (x2, y2), (x1, y2)])
return box_poly.intersects(roi_poly)
def is_same_direction(traj1, traj2, angle_thr):
vec1 = np.array([traj1[-1][0] - traj1[0][0], traj1[-1][1] - traj1[0][1]])
vec2 = np.array([traj2[-1][0] - traj2[0][0], traj2[-1][1] - traj2[0][1]])
L1 = np.sqrt(vec1.dot(vec1))
L2 = np.sqrt(vec2.dot(vec2))
if L1 == 0 or L2 == 0:
return False
cos = vec1.dot(vec2)/(L1*L2)
angle = np.arccos(cos) * 360/(2*np.pi)
return angle < angle_thr
def count(json_dir, video_dir, track_dir, save_dir):
starttime = timeit.default_timer()
remove_wrong_classes = config['remove_wrong_classes']
min_track_len = config['tracker']['min_len']
Path(save_dir).mkdir(parents=True, exist_ok=True)
cam_datas = get_list_data(json_dir)
results = []
for cam_data in cam_datas:
cam_name = cam_data['camName']
width = int(cam_data['imageWidth'])
height = int(cam_data['imageHeight'])
track_res_path = os.path.join(track_dir, cam_name + '.npy')
tracks = np.load(track_res_path, allow_pickle=True)
mm_track = {}
tipical_trajs = {}
for mm_id, mm in enumerate(cam_data['shapes'][1:]):
if 'tracklets' in mm.keys():
tipical_trajs[mm_id] = [mm['tracklets']]
else:
tipical_trajs[mm_id] = [mm['points']]
track_dict = []
for class_id, class_tracks in enumerate(tracks):
track_dict.append({})
for frame_id, vehicle_tracks in enumerate(class_tracks):
for track in vehicle_tracks:
x1 = track[0]
y1 = track[1]
x2 = track[2]
y2 = track[3]
cx = int((x1 + x2) / 2)
cy = int((y1 + y2) / 2)
track_id = int(track[5])
if track_id in track_dict[class_id]:
track_dict[class_id][track_id]['endframe'] = frame_id
track_dict[class_id][track_id]['bbox'].append([frame_id, x1, y1, x2, y2, class_id])
track_dict[class_id][track_id]['tracklet'].append([cx, cy])
else:
track_dict[class_id][track_id] = {'startframe' : frame_id,
'endframe' : frame_id,
'bbox' : [[frame_id, x1, y1, x2, y2, class_id]],
'tracklet' : [[cx, cy]]}
for class_id, _ in enumerate(track_dict):
mm_track[class_id] = {}
track_ids = sorted([k for k in track_dict[class_id].keys()])
for track_id in track_ids:
if len(track_dict[class_id][track_id]['tracklet']) < config['tracker']['min_len']:
continue
track_traj = track_dict[class_id][track_id]['tracklet']
# calc hausdorff dist with tipical trajs, assign the movement with the min dist
all_dists_dict = {k: float('inf') for k in tipical_trajs}
for m_id, m_t in tipical_trajs.items():
for t in m_t:
tmp_dist = hausdorff_distance(np.array(track_traj), np.array(t), distance='euclidean')
if tmp_dist < all_dists_dict[m_id]:
all_dists_dict[m_id] = tmp_dist
# check direction
all_dists = sorted(all_dists_dict.items(), key=lambda k: k[1])
min_idx, min_dist = None, config['counter']['dist_thr']
for i in range(0, len(all_dists)):
m_id = all_dists[i][0]
m_dist = all_dists[i][1]
if m_dist >= config['counter']['dist_thr']: #if min dist > dist_thr, will not assign to any movement
break
else:
if is_same_direction(track_traj, tipical_trajs[m_id][0], config['counter']['angle_thr']): #check direction
min_idx = m_id
min_dist = m_dist
break # if match, end
else:
continue # direction not matched, find next m_id
if min_idx == None and min_dist >= config['counter']['dist_thr']:
continue
#save counting results
mv_idx = min_idx
#get last frameid in roi
bboxes = track_dict[class_id][track_id]['bbox']
bboxes.sort(key=lambda x: x[0])
dst_frame = bboxes[0][0]
last_bbox = bboxes[-1]
roi = cam_data['shapes'][0]['points']
if check_bbox_overlap_with_roi(last_bbox, roi) == True:
dst_frame = last_bbox[0]
else:
for i in range(len(bboxes) - 2, 0, -1):
bbox = bboxes[i]
if check_bbox_overlap_with_roi(bbox, roi) == True:
dst_frame = bbox[0]
break
else:
continue
track_types = [k[5] for k in bboxes]
track_type = max(track_types, key=track_types.count)
mm_track[class_id][track_id] = mv_idx
results.append([cam_name, dst_frame, mv_idx, class_id])
filepath = os.path.join(save_dir, cam_name + '.json')
with open(filepath, 'w') as f:
json.dump(mm_track, f)
results.sort(key=lambda x: ([x[0], x[1], x[2], x[3]]))
result_filename = os.path.join(save_dir, 'result.txt')
with open(result_filename, 'w') as result_file:
for result in results:
result_file.write('{} {} {} {}\n'.format(result[0], result[1] + 1, result[2] + 1, result[3] + 1))
endtime = timeit.default_timer()
print('Count time: {} seconds'.format(endtime - starttime))
if __name__=='__main__':
args = parse_args()
json_dir = args['json_dir']
video_dir = args['video_dir']
track_dir = args['track_dir']
save_dir = args['save_dir']
count(json_dir, video_dir, track_dir, save_dir) | PhanVinhLong/vehicle-counting-aichcmc2020 | count2.py | count2.py | py | 6,482 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
72311226344 | import os
import subprocess
from src.manager.manager.launcher.launcher_interface import ILauncher, LauncherException
from src.manager.manager.docker_thread.docker_thread import DockerThread
from src.manager.libs.process_utils import wait_for_xserver
from typing import List, Any
import time
class LauncherDronesRos2(ILauncher):
exercise_id: str
type: str
module: str
resource_folders: List[str]
model_folders: List[str]
plugin_folders: List[str]
world_file: str
running = False
threads: List[Any] = []
def run(self, callback):
# Start X server in display
xserver_cmd = f"/usr/bin/Xorg -quiet -noreset +extension GLX +extension RANDR +extension RENDER -logfile ./xdummy.log -config ./xorg.conf :0"
xserver_thread = DockerThread(xserver_cmd)
xserver_thread.start()
wait_for_xserver(":0")
self.threads.append(xserver_thread)
# expand variables in configuration paths
self._set_environment()
world_file = os.path.expandvars(self.world_file)
# Launching MicroXRCE and Aerostack2 nodes
as2_launch_cmd = f"ros2 launch jderobot_drones as2_default_classic_gazebo.launch.py world_file:={world_file}"
as2_launch_thread = DockerThread(as2_launch_cmd)
as2_launch_thread.start()
self.threads.append(as2_launch_thread)
# Launching gzserver and PX4
px4_launch_cmd = f"$AS2_GZ_ASSETS_SCRIPT_PATH/default_run.sh {world_file}"
px4_launch_thread = DockerThread(px4_launch_cmd)
px4_launch_thread.start()
self.threads.append(px4_launch_thread)
self.running = True
def is_running(self):
return True
def terminate(self):
if self.is_running():
for thread in self.threads:
thread.terminate()
thread.join()
self.running = False
def _set_environment(self):
resource_folders = [os.path.expandvars(path) for path in self.resource_folders]
model_folders = [os.path.expandvars(path) for path in self.model_folders]
plugin_folders = [os.path.expandvars(path) for path in self.plugin_folders]
os.environ["GAZEBO_RESOURCE_PATH"] = f"{os.environ.get('GAZEBO_RESOURCE_PATH', '')}:{':'.join(resource_folders)}"
os.environ["GAZEBO_MODEL_PATH"] = f"{os.environ.get('GAZEBO_MODEL_PATH', '')}:{':'.join(model_folders)}"
os.environ["GAZEBO_PLUGIN_PATH"] = f"{os.environ.get('GAZEBO_PLUGIN_PATH', '')}:{':'.join(plugin_folders)}"
| JdeRobot/RoboticsApplicationManager | manager/manager/launcher/launcher_drones_ros2.py | launcher_drones_ros2.py | py | 2,530 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "src.manager.manager.launcher.launcher_interface.ILauncher",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"... |
11883990410 | from lcp.core.interfaces.module import Module
from lcp.modules.camerafeed.camera_feed import CameraFeed
import cv2 as cv
import _thread
class FaceDetector(Module):
__name = "Face Detector"
__version = "1.0"
__dependencies = [CameraFeed]
def __init__(self, config):
super().__init__(self.__name, self.__version, self.__dependencies)
self.__face_classifier_file = config.get('face_classifier', fallback='classifier.xml')
self.__face_classifier = []
self.__absolute_face_size = 0
self.__tracked_faces = []
self.__frame_width = 0
self.__frame_height = 0
self.__camera_feed = []
self.__detector_thread = []
def install(self, modules):
modules = super().install(modules)
self.__camera_feed = modules['CameraFeed']
self.__face_classifier = cv.CascadeClassifier('..\\modules\\facedetector\\classifiers\\' + self.__face_classifier_file)
def start(self):
self.__detector_thread = _thread.start_new_thread(self.__detect_faces, ())
def get_detected_faces(self):
return self.__tracked_faces
def get_frame_dimensions(self):
return self.__frame_width, self.__frame_height
def __detect_faces(self):
while True:
frame = self.__camera_feed.get_frame()
gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
gray_frame = cv.equalizeHist(gray_frame)
self.__frame_height, self.__frame_width, _ = frame.shape
if self.__absolute_face_size == 0:
height, width = gray_frame.shape[:2]
if float(height) * 0.2 > 0:
self.__absolute_face_size = int(height * 0.2)
self.__tracked_faces = self.__face_classifier.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=2, minSize=(self.__absolute_face_size, self.__absolute_face_size))
| huybthomas/LCP-Core-Old | src/lcp/modules/facedetector/face_detector.py | face_detector.py | py | 1,900 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "lcp.core.interfaces.module.Module",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "lcp.modules.camerafeed.camera_feed.CameraFeed",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 26,
"usage_type": "... |
70666328104 | import xml.etree.ElementTree as ET
bd=ET.Element("base")
ventana=ET.SubElement(bd,"ventana", name="ventana-consultas")
ventana_hide=ET.SubElement(ventana,"ventana-hide",)
ventana_hide.set("option-hide","false")
ET.dump(bd)
tree = ET.ElementTree(bd)
tree.write("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
estructura_xml = ET.parse("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
# Obtiene el elemento raíz:
raiz = estructura_xml.getroot()
'''for ventana in raiz.findall('ventana'):
print(ventana)
print("espacio1")
print(ventana.get("option-hide"))
print("nada")
'''
for ventana in raiz.iter('ventana'):
print("get: "+str(ventana.get("option-hide")))
ventana.set("option-hide","0")
print(ventana.get("option-hide"))
estructura_xml.write("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
| ColqueRicardo/v-version | pruebas/pruebas aisladas/archivos xml.py | archivos xml.py | py | 829 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 4,
"usage_type": "call"
},
{
... |
42360576812 | """
__title__ = ''
__author__ = 'Thompson'
__mtime__ = '2018/5/23'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import requests
# 根据协议类型,选择不同的代理
proxies = {
"http": "http://118.190.95.35:9001",
}
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
response = requests.get("http://www.baidu.com", proxies = proxies,headers=headers)
print(response.content.decode()) | hwzHw/python37 | day0109/requests_04_代理IP.py | requests_04_代理IP.py | py | 1,014 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
}
] |
2080670146 | # -*- coding : utf-8 -*-
import numpy as np
import torch
from torch import nn
class DNN(nn.Module):
def __init__(self,args):
super().__init__()
self.outDim = args.outDim
self.seqLen = args.seqLen
self.hiddenDim1 = args.hiddenDim1
self.hiddenDim2 = args.hiddenDim2
self.hiddenDim3 = args.hiddenDim3
self.fc1 = nn.Linear(self.seqLen,self.hiddenDim1)
self.bn1 = nn.BatchNorm1d(self.hiddenDim1)
# self.relu = nn.RReLU()
self.relu = nn.RReLU()
self.fc2 = nn.Linear(self.hiddenDim1,self.hiddenDim2)
self.bn2 = nn.BatchNorm1d(self.hiddenDim2)
self.fc3 = nn.Linear(self.hiddenDim2,self.hiddenDim3)
self.bn3 = nn.BatchNorm1d(self.hiddenDim3)
self.out = nn.Linear(self.hiddenDim3,self.outDim)
self.dnn = nn.Sequential(
self.fc1,
self.bn1,
self.relu,
self.fc2,
self.bn2,
self.relu,
self.fc3,
self.bn3,
self.relu,
self.out,
)
def forward(self,seq):
#assure seq is 1 dim
seq.view(-1)
out = self.dnn(seq)
return out
| Ylizin/RWSim | ylSim/DNN.py | DNN.py | py | 1,213 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
5049970089 | import copy
import json
import math
import struct
from functools import partial
from pathlib import Path, PosixPath
import numpy as np
# isort: off
import torch
import tensorrt as trt
# isort: on
# numpy doesn't know bfloat16, define abstract binary type instead
np_bfloat16 = np.dtype('V2', metadata={"dtype": "bfloat16"})
def torch_to_numpy(x: torch.Tensor):
assert isinstance(x, torch.Tensor), \
f'x must be a torch.Tensor object, but got {type(x)}.'
if x.dtype != torch.bfloat16:
return x.detach().cpu().numpy()
return x.view(torch.int16).detach().cpu().numpy().view(np_bfloat16)
def numpy_to_torch(x):
if x.dtype != np_bfloat16:
return torch.tensor(x)
return torch.tensor(x.view(np.int16)).view(torch.bfloat16)
def numpy_to_dtype(x, dtype: str):
if x.dtype == np_bfloat16:
# BF16 --> non-BF16 or BF16
if dtype != 'bfloat16':
torch_to_numpy(numpy_to_torch(x).to(str_dtype_to_torch(dtype)))
else:
return x
else:
# non-BF16 types --> non-BF16 or BF16
if dtype != 'bfloat16':
return x.astype(str_dtype_to_np(dtype))
else:
return torch_to_numpy(torch.from_numpy(x).to(torch.bfloat16))
fp32_array = partial(np.array, dtype=np.float32)
fp16_array = partial(np.array, dtype=np.float16)
int32_array = partial(np.array, dtype=np.int32)
def bf16_array(x):
x = torch.tensor(x, dtype=torch.bfloat16)
x = torch_to_numpy(x)
return x
def trt_version():
return trt.__version__
def torch_version():
return torch.__version__
_str_to_np_dict = dict(
float16=np.float16,
float32=np.float32,
int32=np.int32,
bfloat16=np_bfloat16,
)
def str_dtype_to_np(dtype):
ret = _str_to_np_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_str_to_torch_dtype_dict = dict(
bfloat16=torch.bfloat16,
float16=torch.float16,
float32=torch.float32,
int32=torch.int32,
int8=torch.int8,
)
def str_dtype_to_torch(dtype):
ret = _str_to_torch_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_str_to_trt_dtype_dict = dict(float16=trt.float16,
float32=trt.float32,
int64=trt.int64,
int32=trt.int32,
int8=trt.int8,
bool=trt.bool,
bfloat16=trt.bfloat16,
fp8=trt.fp8)
def str_dtype_to_trt(dtype):
ret = _str_to_trt_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_np_to_trt_dtype_dict = {
np.int8: trt.int8,
np.int32: trt.int32,
np.float16: trt.float16,
np.float32: trt.float32,
# hash of np.dtype('int32') != np.int32
np.dtype('int8'): trt.int8,
np.dtype('int32'): trt.int32,
np.dtype('float16'): trt.float16,
np.dtype('float32'): trt.float32,
np_bfloat16: trt.bfloat16,
np.bool_: trt.bool,
}
def np_dtype_to_trt(dtype):
ret = _np_to_trt_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_trt_to_np_dtype_dict = {
trt.int8: np.int8,
trt.int32: np.int32,
trt.float16: np.float16,
trt.float32: np.float32,
trt.bool: np.bool_,
trt.bfloat16: np_bfloat16,
}
def trt_dtype_to_np(dtype):
ret = _trt_to_np_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_torch_to_np_dtype_dict = {
torch.float16: np.float16,
torch.float32: np.float32,
}
def torch_dtype_to_np(dtype):
ret = _torch_to_np_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
_trt_to_torch_dtype_dict = {
trt.float16: torch.float16,
trt.float32: torch.float32,
trt.int32: torch.int32,
trt.int8: torch.int8,
trt.bfloat16: torch.bfloat16
}
def trt_dtype_to_torch(dtype):
ret = _trt_to_torch_dtype_dict.get(dtype)
assert ret is not None, f'Unsupported dtype: {dtype}'
return ret
def dim_to_trt_axes(dim):
"""Converts torch dim, or tuple of dims to a tensorrt axes bitmask"""
if not isinstance(dim, tuple):
dim = (dim, )
# create axes bitmask for reduce layer
axes = 0
for d in dim:
axes |= 1 << d
return axes
def dim_resolve_negative(dim, ndim):
if not isinstance(dim, tuple):
dim = (dim, )
pos = []
for d in dim:
if d < 0:
d = ndim + d
pos.append(d)
return tuple(pos)
def mpi_comm():
from mpi4py import MPI
return MPI.COMM_WORLD
def mpi_rank():
return mpi_comm().Get_rank()
def mpi_world_size():
return mpi_comm().Get_size()
def pad_vocab_size(vocab_size, tp_size):
return int(math.ceil(vocab_size / tp_size) * tp_size)
def to_dict(obj):
return copy.deepcopy(obj.__dict__)
def to_json_string(obj):
if not isinstance(obj, dict):
obj = to_dict(obj)
return json.dumps(obj, indent=2, sort_keys=True) + "\n"
def to_json_file(obj, json_file_path):
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(to_json_string(obj))
def numpy_fp32_to_bf16(src):
# Numpy doesn't support bfloat16 type
# Convert float32 to bfloat16 manually and assign with bf16 abstract type
original_shape = src.shape
src = src.flatten()
src = np.ascontiguousarray(src)
assert src.dtype == np.float32
dst = np.empty_like(src, dtype=np.uint16)
for i in range(len(dst)):
bytes = struct.pack('<f', src[i])
dst[i] = struct.unpack('<H', struct.pack('BB', bytes[2], bytes[3]))[0]
return dst.reshape(original_shape).view(np_bfloat16)
def fromfile(dir_path, name, shape=None, dtype=None):
dtype = np_dtype if dtype is None else dtype
p = dir_path
if not isinstance(p, PosixPath):
p = Path(p)
p = p / name
if Path(p).exists():
t = np.fromfile(p, dtype=dtype)
if shape is not None:
t = t.reshape(shape)
return t
return None
| NVIDIA/TensorRT-LLM | tensorrt_llm/_utils.py | _utils.py | py | 6,159 | python | en | code | 3,328 | github-code | 36 | [
{
"api_name": "numpy.dtype",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.bfloat16",
... |
28121817798 | import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
import settings
from handlers import *
def make_app():
db = None
handlers = [
(r"/", MainHandler),
(r"/covert", CovertHandler)
]
config = {"template_path":settings.TEMPLATE_PATH, "static_path":settings.ASSETS_PATH, "cookie_secret":settings.COOKIE_SECRET, "debug":True}
return tornado.web.Application(handlers, **config)
if __name__ == '__main__':
tornado.options.parse_command_line()
app = make_app()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(settings.SERVER_PORT)
tornado.ioloop.IOLoop.instance().start()
| caroltc/lrc2srt | app.py | app.py | py | 672 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "settings.TEMPLATE_PATH",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "settings.ASSETS_PATH",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "settings.COOKIE_SECRET",
"line_number": 14,
"usage_type": "attribute"
},
{
... |
19782973821 | from datetime import datetime
from scipy import misc
import tensorflow as tf
import os
import src.facenet.detect_face
import cv2
import matplotlib.pyplot as plt
from helper import get_images_from_file_list, get_box_from_ellipse
import math
import pickle
import dlib
# ============================================
# Global variables
# ============================================
AVG_FACE_HEIGHT = 142.58539351061276
AVG_FACE_WIDTH = 94.11600875170973
# CNN global vars
gpu_memory_fraction = 1.0
minsize = 50 # minimum size of face
threshold = [0.5, 0.6, 0.7] # three steps's threshold
factor = 0.800 # scale factor
# Haar and Dlib global vars
face_cascade = cv2.CascadeClassifier('src/haarcascades/haarcascade_frontalface_default.xml')
dlib_face_detector = dlib.get_frontal_face_detector()
# ============================================
# Face detection methods
# ============================================
# For a given image, uses the dlib face detection algorithm to predict
# all of the faces present in the image. The algorithm used is based on
# a 29-layer ResNet network architecture. Returns a list of dlib.rectangle
# objects
def dlib_face_detect(image, upscale=1):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = dlib_face_detector(gray, upscale)
return rects
# For a given image, uses the FaceNet CNN detector to predict all of the faces
# present in the given image. Returns a list of bounding boxes (x,y,w,h) of the
# faces. This code was largely borrowed from the blog of Charles Jekel, found here:
# http://jekel.me/2017/How-to-detect-faces-using-facenet/
def cnn_face_detect(image):
# Configuring facenet in facenet/src/compare.py
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = src.facenet.detect_face.create_mtcnn(sess, None)
# run detect_face from the facenet library
bounding_boxes, _ = src.facenet.detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
# for each face detection, compute bounding box and add as tuple
face_detections = []
for (x1, y1, x2, y2, acc) in bounding_boxes:
# skip detections with < 60% confidence
if acc < .6:
continue
w = x2 - x1
h = y2 - y1
face_detections.append((x1, y1, w, h))
return face_detections
# For a given image, use the Haar Cascade detector provided by OpenCV to detect
# all of the faces present in the given image. Uses the parameters scale_factor and
# min_neighbors. Returns a list of bounding boxes (x,y,w,h) of the faces
def haar_face_detect(image, scale_factor, min_neighbors, use_grayscale=True, cascade=None):
if use_grayscale:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Can provide a different cascade type if desired. Cascades found in src/haarcascades
if not cascade:
return face_cascade.detectMultiScale(image, scale_factor, min_neighbors)
else:
return cascade.detectMultiScale(image, scale_factor, min_neighbors)
# ============================================
# Helper functions
# ============================================
# For a given fold number [1-10], retrieve a nested list of bounding boxes for faces for each image
# in the fold. Ex data: [[img1_face1, img1_face2], [img2_face1], ...] where each face bounding box
# is a tuple of (x, y, width, height)
def retrieve_face_list(fold_num):
assert fold_num > 0 and fold_num <= 10
fold_file = 'img/FDDB-folds/FDDB-fold-{:02}-ellipseList.txt'.format(fold_num)
rectangle_file = 'img/FDDB-folds/FDDB-fold-{:02}-rectangleList.pkl'.format(fold_num)
# If this list has already been created, can load it from a pickle file
if os.path.exists(rectangle_file):
with open(rectangle_file, 'rb') as f:
face_list = pickle.load(f)
else:
face_list = []
count, face_count = 0, 0
with open(fold_file, 'r') as f:
file_name = f.readline().rstrip()
while file_name:
num_faces = int(f.readline().rstrip())
count += 1
face_count += num_faces
# iterates over each of the faces in image
faces = []
for i in range(num_faces):
major, minor, angle, h, k, _ = map(float, f.readline().rstrip().split())
faces.append(get_box_from_ellipse(major, minor, angle, h, k))
face_list.append(faces)
# go to next file
file_name = f.readline().rstrip()
print('num images: {}, total num faces: {}'.format(count, face_count))
with open(rectangle_file, 'wb') as w:
pickle.dump(face_list, w)
return face_list
def retrieve_manual_face_labels(fold_num, file_names):
file_list = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
rectangle_file = 'img/manual/face_labels.pkl'
if os.path.exists(rectangle_file):
print("loading from pickle")
with open(rectangle_file, 'rb') as f:
face_list = pickle.load(f)
return face_list
with open(file_list, 'r') as f:
file_list = [x.rstrip() for x in f.readlines()]
rectangles = retrieve_face_list(fold_num)
face_list = []
for f in file_names:
for i, file in enumerate(file_list):
if f == file:
face_list.append(rectangles[i])
break
with open(rectangle_file, 'wb') as f:
pickle.dump(face_list, f)
return face_list
# ============================================
# Testing methods
# ============================================
# TODO: replace with a max flow?
def compute_accuracy(labels, predictions):
faces_found, false_pos = 0, 0
for prediction in predictions:
if type(prediction) == dlib.dlib.rectangle:
x_p, y_p, w_p, h_p = prediction.left(), prediction.top(), prediction.right()-prediction.left(), prediction.bottom()-prediction.top()
else:
x_p, y_p, w_p, h_p = prediction
center_px, center_py = x_p + w_p/2, y_p + h_p/2
found_one = False
for label in labels:
x_l, y_l, w_l, h_l = label
center_lx, center_ly = x_l + w_l/2, y_l + h_l/2
if (abs(center_lx - center_px) < .4*w_l and abs(center_ly - center_py) < .4*h_l
and .5*w_l < w_p and w_p < 1.5*w_l and .5*h_l < h_p and h_p < 1.5*h_l):
# num_correct += 1
faces_found += 1
found_one = True
break
if found_one is False:
false_pos += 1
if faces_found > len(labels):
diff = faces_found - len(labels)
false_pos += diff
faces_found = len(labels)
return faces_found, len(labels), false_pos
def write_detections(fold_num, file_names, face_images, face_labels):
directory = 'pred/facenet/{:03}-{}{}{}'.format(int(factor*1000), int(threshold[0]*10), int(threshold[1]*10), int(threshold[2]*10))
file = directory + '/fold-{}.pkl'.format(fold_num)
print(file)
# return
if os.path.exists(file):
print('file {} already exists'.format(file))
return
if not os.path.exists(directory):
os.makedirs(directory)
all_predictions = []
for image in face_images:
predictions = cnn_face_detect(image)
all_predictions.append(predictions)
with open(file, 'wb') as f:
pickle.dump(all_predictions, f)
def test_detection(fold_num, file_names, face_images, face_labels):
total_faces, total_num_correct, total_false_pos = 0, 0, 0
count = 0
for image, label_set in zip(face_images, face_labels):
file = file_names[count]
count += 1
# choose detector
# predictions = haar_face_detect(image, 1.25, 5)
predictions = cnn_face_detect(image)
# predictions = dlib_face_detect(image)
num_correct, num_faces, false_pos = compute_accuracy(label_set, predictions)
total_num_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
# print("found {} out of {} faces in ".format(total_num_correct, total_faces))
# print("accuracy: {}".format(num_correct/total_faces))
return total_num_correct, total_faces, total_false_pos
def test_dlib_detection(fold_num, file_names, face_images, face_labels, upscale):
total_faces, total_num_correct, total_false_pos = 0, 0, 0
for image, label_set in zip(face_images, face_labels):
predictions = dlib_face_detect(image, upscale=upscale)
num_correct, num_faces, false_pos = compute_accuracy(label_set, predictions)
total_faces += num_faces
total_num_correct += num_correct
total_false_pos += false_pos
return total_num_correct, total_faces, total_false_pos
def test_haar_detection(fold_num, file_names, face_images, face_labels, scale_factor, min_neighbors):
total_faces, total_num_correct, total_false_pos = 0, 0, 0
for image, label_set in zip(face_images, face_labels):
predictions = haar_face_detect(image, scale_factor, min_neighbors)
num_correct, num_faces, false_pos = compute_accuracy(label_set, predictions)
total_faces += num_faces
total_num_correct += num_correct
total_false_pos += false_pos
return total_num_correct, total_faces, total_false_pos
def test_cnn_detection(fold_num, file_names, face_images, face_labels):
directory = 'predictions/facenet/{:03}-{}{}{}'.format(int(factor*1000), int(threshold[0]*10), int(threshold[1]*10), int(threshold[2]*10))
pkl_file = directory + '/fold-{}.pkl'.format(fold_num)
total_faces, total_num_correct, total_false_pos = 0, 0, 0
if os.path.exists(pkl_file):
print('found file, loading')
with open(pkl_file, 'rb') as f:
fold_predictions = pickle.load(f)
# iterates over each image in the fold
for face_detections, labels in zip(fold_predictions, face_labels):
num_correct, num_faces, false_pos = compute_accuracy(labels, face_detections)
total_num_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
return total_num_correct, total_faces, total_false_pos
# predictions do not already exist for the fold, so make them and then write them
count = 0
fold_predictions = []
for image, label_set in zip(face_images, face_labels):
file = file_names[count]
count += 1
predictions = cnn_face_detect(image)
fold_predictions.append(predictions)
num_correct, num_faces, false_pos = compute_accuracy(label_set, predictions)
total_num_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
with open(pkl_file, 'wb') as f:
pickle.dump(fold_predictions, f)
return total_num_correct, total_faces, total_false_pos
def test_on_one_image(file_names, face_labels):
name = '2002/08/05/big/img_3688'
img = cv2.imread('img/FDDB-pics/{}.jpg'.format(name))
index = -1
for i, file in enumerate(file_names):
if name in file:
index = i
break
print('found file at index {}'.format(i))
# faces = cnn_face_detect(img)
faces = haar_face_detect(img, 1.3, 4)
label_set = face_labels[i]
print("detections: (x,y,w,h)")
# for i in range(len(label_set)):
for i, prediction in enumerate(faces):
print("*************** prediction {} *************".format(i))
x_p, y_p, w_p, h_p = prediction
print(x_p,y_p,w_p,h_p)
cv2.rectangle(img,(int(x_p),int(y_p)),(int(x_p+w_p),int(y_p+h_p)),(255,0,0),2)
center_px, center_py = x_p + w_p/2, y_p + h_p/2
found_one = False
for label in label_set:
x_l, y_l, w_l, h_l = label
print(x_l, y_l, w_l, h_l)
center_lx, center_ly = x_l + w_l/2, y_l + h_l/2
print(abs(center_lx - center_px) < .3*w_l)
print(abs(center_ly - center_py) < .3*h_l)
print(.5*w_l < w_p and w_p < 1.5*w_l)
print(.5*h_l < h_p and h_p < 1.5*h_l)
print("//////////////////")
if (abs(center_lx - center_px) < .3*w_l and abs(center_ly - center_py) < .3*h_l
and .5*w_l < w_p and w_p < 1.5*w_l and .5*h_l < h_p and h_p < 1.5*h_l):
# num_correct += 1
# faces_found_in_img += 1
found_one = True
break
if found_one is False:
print('false pos found for prediction {}'.format(i))
# false_pos += 1
# for (x,y,w,h) in faces:
# print(x,y,w,h)
# cv2.rectangle(img,(int(x),int(y)),(int(x+w),int(y+h)),(255,0,0),2)
print('labels:')
print(face_labels[i])
plt.figure()
plt.imshow(img)
plt.show()
# The main method is used to compare the accuracies of the FaceNet detector and Haar Cascade detector
#
def test_accuracy():
total_correct, total_faces, total_false_pos = 0, 0, 0
start_time = datetime.now()
for fold_num in [2,3,4,5]:
img_list_file = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
face_labels = retrieve_face_list(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
# num_correct, num_faces, false_pos = test_detection(fold_num, file_names, face_images, face_labels)
num_correct, num_faces, false_pos = test_cnn_detection(fold_num, file_names, face_images, face_labels)
total_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
delta = datetime.now() - start_time
print('******** TOTALS ***********')
print('found {}/{} faces'.format(total_correct, total_faces))
print('total false pos: {}'.format(total_false_pos))
print('accuracy: {}'.format(total_correct/total_faces))
print('Time elapsed (hh:mm:ss.ms) {}'.format(delta))
def test_one_image():
fold_num = 5
img_list_file = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
face_labels = retrieve_face_list(fold_num)
test_on_one_image(file_names, face_labels)
def test_on_manual_labels():
img_list_file = 'img/manual/image_list.txt'
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
start_time = datetime.now()
face_labels = retrieve_manual_face_labels(1, file_names)
# num_correct, num_faces, false_pos = test_detection(1, file_names, face_images, face_labels)
num_correct, num_faces, false_pos = test_cnn_detection(1, file_names, face_images, face_labels)
delta = datetime.now() - start_time
print('found {}/{} faces'.format(num_correct, num_faces))
print('total false pos: {}'.format(false_pos))
print('accuracy: {}'.format(num_correct/num_faces))
print('Time elapsed (hh:mm:ss.ms) {}'.format(delta))
def test_haar():
folds = [2,3,4,5]
# prepare fold info
fold_to_info_dict = {}
for fold_num in folds:
img_list_file = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
face_labels = retrieve_face_list(fold_num)
fold_to_info_dict[fold_num] = (file_names, face_images, face_labels)
for min_neighbors in [0,1,2,3,4,5]:
scale = 1.05
while scale < 1.5:
start = datetime.now()
total_correct, total_faces, total_false_pos = 0, 0, 0
for fold_num in folds:
file_names, face_images, face_labels = fold_to_info_dict[fold_num]
num_correct, num_faces, false_pos = test_haar_detection(fold_num, file_names, face_images, face_labels, scale, min_neighbors)
total_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
delta = datetime.now() - start
print('minNeighbors={}, scale={}: accuracy={}, avgFalsePos={}, ttlFP={}, timing={}'.format(min_neighbors, scale, total_correct/total_faces, total_false_pos/len(folds), total_false_pos, delta))
scale += .05
def test_dlib():
folds = [2,3,4,5]
# prepare fold info
fold_to_info_dict = {}
for fold_num in folds:
img_list_file = 'img/FDDB-folds/FDDB-fold-{:02}.txt'.format(fold_num)
with open(img_list_file, 'r') as f:
file_names = [x.rstrip() for x in f.readlines()]
face_images = get_images_from_file_list(file_names)
face_labels = retrieve_face_list(fold_num)
fold_to_info_dict[fold_num] = (file_names, face_images, face_labels)
for upscale in [0,1,2,3]:
start = datetime.now()
total_correct, total_faces, total_false_pos = 0, 0, 0
for fold_num in folds:
file_names, face_images, face_labels = fold_to_info_dict[fold_num]
num_correct, num_faces, false_pos = test_dlib_detection(fold_num, file_names, face_images, face_labels, upscale)
total_correct += num_correct
total_faces += num_faces
total_false_pos += false_pos
delta = datetime.now() - start
print('upscale={}: accuracy={}, avgFalsePos={}, ttlFP={}, time: {}'.format(upscale, total_correct/total_faces, total_false_pos/len(folds), total_false_pos, delta))
if __name__ == "__main__":
# main()
test_haar()
# test_dlib()
# test_one_image()
# test_on_manual_labels()
| ryan-mccaffrey/glasses-for-everyone | detect_face.py | detect_face.py | py | 18,364 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dlib.get_frontal_face_detector",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2... |
23077134371 | from spa.clientside.asyncdbhandler import CAsyncDBHandler
from spa import BaseServiceID, tagBaseRequestID
class CPostgres(CAsyncDBHandler):
# Asynchronous and SQL streaming postgreSQL service id
sidPostgres = BaseServiceID.sidReserved + 0x6FFFFFF4
"""
An Open flag option, which is specific to PostgreSQL plugin.
It is noted that this flag option is not implemented within SocketPro plugin yet.
"""
ROWSET_META_FLAGS_REQUIRED = 0x40000000
"""
An Open flag option, which is specific to PostgreSQL plugin.
When the flag option is used with the method Open or open,
it forces fetching data from remote PostgreSQL server to SocketPro plugin row-by-row instead of all.
The flag option should be used if there is a large number of data within a rowset.
"""
USE_SINGLE_ROW_MODE = 0x20000000
# error codes for unexpected programming errors
ER_NO_DB_OPENED_YET = -1981
ER_BAD_END_TRANSTACTION_PLAN = -1982
ER_NO_PARAMETER_SPECIFIED = -1983
ER_BAD_PARAMETER_COLUMN_SIZE = -1984
ER_BAD_PARAMETER_DATA_ARRAY_SIZE = -1985
ER_DATA_TYPE_NOT_SUPPORTED = -1986
ER_BAD_TRANSTACTION_STAGE = -1987
def __init__(self, sid=sidPostgres):
super(CPostgres, self).__init__(sid)
| udaparts/socketpro | bin/spa/clientside/upostgres.py | upostgres.py | py | 1,257 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "spa.clientside.asyncdbhandler.CAsyncDBHandler",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "spa.BaseServiceID.sidReserved",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "spa.BaseServiceID",
"line_number": 6,
"usage_type": "name"... |
29647725517 | import sqlite3
import pandas as pd
import time
import sys
from drugbank.drugbank_index_query import drugbank_search
from hpo.hpo_index_query import hpo_search
from omim.omim_index_query import omim_search
from stitch.stitch_chemical_sources_index_query import stitch_chemical_sources_search
from stitch.stitch_br08303_index_query import stitch_br08303_search
from python_requests import *
from usefull_temp import *
from link import *
'''
GLOBAL LISTS :
disease_list = {disease_name : [occurrence, source]}
curing_drug_list = {drugname : [occurrence, description, indication, toxicity, sources]}
side_effects_from_drug_list = {drugname : [occurrence, description, indication, toxicity, sources]}
'''
## SEARCH FROM HPO
def correction_hpo_disease_label(label):
if (len(label) > 0 and label[0]=='#'):
label = label.split(" ", 1)[1]
if (len(label) > 0 and ',' in label):
label = label.split(",", 1)[0]
if (len(label) > 0 and ';' in label):
label = label.split(";", 1)[0]
return label
def get_diseases_from_hpo(hpo_id):
disease_list = []
DATABASE = "../data/HPO/hpo_annotations.sqlite"
conn = sqlite3.connect(DATABASE)
curs = conn.cursor()
hpo_id = hpo_id.replace('_', ':')
req = f"SELECT disease_label FROM phenotype_annotation WHERE sign_id = \"{hpo_id}\""
curs.execute(req)
for disease_tuple in curs.fetchall():
disease = disease_tuple[0]
disease = disease.lower()
disease = correction_hpo_disease_label(disease)
disease_list.append(disease)
conn.commit()
curs.close()
return disease_list
## SEARCH FROM SIDER
SIDER_FILE = "../data/MEDDRAS/meddra_all_se.csv"
def get_sider_id(symptom):
content = []
df = pd.read_csv(SIDER_FILE, sep=',')
n = len(df)
for k in range(n):
if symptom in df['side_effect_name'][k].lower():
id1 = df['stitch_compound_id1'][k]
id2 = df['stitch_compound_id2'][k]
content.append((id1, id2))
return content
## SEARCH FROM DRUGBANK
## GLOBAL SEARCH FUNCTION
def search_disease_from_symptom(symptom, disease_list):
## get symptoms
hpo_query = create_hpo_query(symptom)
content_hpo = hpo_search(hpo_query)
## complete symptoms
## Count lost items
Total_hpo_count = len(content_hpo)
count = 0
for elem in content_hpo:
hpo_id = elem[0]
disease_list_from_hpo = get_diseases_from_hpo(hpo_id)
if disease_list_from_hpo == []:
count += 1
else:
for disease in disease_list_from_hpo:
if disease in disease_list:
disease_list[disease][0] += 1
else:
disease_list[disease] = [1, "hpo"]
disease_list = dict(sorted(disease_list.items(), key=lambda item: item[1], reverse=True))
return disease_list
def search_side_effects_drug_from_content_sider_id(content_sider_id, side_effects_from_drug_list):
## link with stitch
content_stitch_atc = []
for elem in content_sider_id:
id1 = elem[0]
id2 = elem[1]
content_stitch_atc += sider_to_stitch_compoundid1(id1, id2)
if len(content_stitch_atc) > 500:
content_stitch_atc = content_stitch_atc[:500]
## link with drugbank
content_drugbank = []
for atc_code in content_stitch_atc:
content_drugbank += stitch_atc_code_to_drugbank(atc_code)
for item in content_drugbank:
name = item[0]
if name in side_effects_from_drug_list:
side_effects_from_drug_list[name][0] += 1
else:
description = item[1]
indication = item[2]
toxicity = item[3]
bloc = [1, description, indication, toxicity, 'sider / stitch / drugbank']
side_effects_from_drug_list[name] = bloc
side_effects_from_drug_list = dict(sorted(side_effects_from_drug_list.items(), key=lambda item: item[1], reverse=True))
return side_effects_from_drug_list
def search_side_effects_drug_from_drugbank(symptom, side_effects_from_drug_list):
query = create_drugbank_query_side_effect(symptom)
content_drugbank = drugbank_search(query)
for item in content_drugbank:
name = item[1]
if name in side_effects_from_drug_list:
side_effects_from_drug_list[name][0] +=1
else:
description = item[2]
indication = item[3]
toxicity = item[4]
sources = 'sider / stitch / drugbank'
bloc = [1, description, indication, toxicity, sources]
side_effects_from_drug_list[name] = bloc
side_effects_from_drug_list = dict(sorted(side_effects_from_drug_list.items(), key=lambda item: item[1], reverse=True))
return side_effects_from_drug_list
def search_curing_drug_from_symtom(symptom, curing_drug_list):
query = create_drugbank_query(symptom)
content_drugbank = drugbank_search(query)
for item in content_drugbank:
name = item[1]
if name in curing_drug_list:
curing_drug_list[name][0] += 1
else:
description = item[2]
indication = item[3]
toxicity = item[4]
sources = "drugbank"
bloc = [1, description, indication, toxicity, sources]
curing_drug_list[name] = bloc
curing_drug_list = dict(sorted(curing_drug_list.items(), key=lambda item: item[1], reverse=True))
return curing_drug_list
def main():
symptom = "abdominal"
# correction of the input
symptom = symptom.lower()
## THINGS TO PRINT : {0: disease_list, 1: curing_drug_list, 2: side_effects_from_drug_list, 3: All}
print_value = 3
## CHECK ARGS
args = sys.argv
if "-s" in args:
pos = args.index("-s")
symptom = args[pos+1]
if "-p" in args:
pos = args.index("-p")
print_value = int(args[pos+1])
# initiation of global lists
disease_list = {}
curing_drug_list = {}
content_sider_id = []
side_effects_from_drug_list = {}
def print_function(print_value, disease_list, curing_drug_list, side_effects_from_drug_list):
if print_value==0:
disease_list = search_disease_from_symptom(symptom, disease_list)
print(len(disease_list))
printlist(disease_list)
elif print_value==1:
curing_drug_list = search_curing_drug_from_symtom(symptom, curing_drug_list)
print(len(curing_drug_list))
printlist(curing_drug_list)
elif print_value==2:
content_sider_id = get_sider_id(symptom)
content_sider_id = content_sider_id[:5]
side_effects_from_drug_list = search_side_effects_drug_from_content_sider_id(content_sider_id, side_effects_from_drug_list)
print(len(side_effects_from_drug_list))
printlist(side_effects_from_drug_list)
start = time.time()
if print_value in [0, 1, 2]:
print_function(print_value, disease_list, curing_drug_list, side_effects_from_drug_list)
if print_value == 3:
print_function(0, disease_list, curing_drug_list, side_effects_from_drug_list)
print_function(1, disease_list, curing_drug_list, side_effects_from_drug_list)
print_function(2, disease_list, curing_drug_list, side_effects_from_drug_list)
end = time.time()
print("#####")
print()
print(f"time : {end - start}")
if __name__ == '__main__':
main() | Hamza-ABDOULHOUSSEN/gmd2k22 | python/data_query.py | data_query.py | py | 7,485 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "hpo.hpo_index_query.hpo_search",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "drugba... |
70489044264 | import pytest
from unittest.mock import AsyncMock, patch
from api.exceptions import InvalidParameterError
from crawler.default.instances.second_instance import SecondInstance
# Mock para a resposta do ClientSession
mock_response = AsyncMock()
mock_response.text.return_value = 'Sample Text'
@pytest.mark.asyncio
async def test_capturar_numero_processo_codigo_invalid():
instance = SecondInstance("TJ", "http://example.com")
with pytest.raises(InvalidParameterError):
await instance._capturar_numero_processo_codigo("123456")
@pytest.mark.asyncio
@patch('crawler.default.instances.second_instance.ClientSession')
async def test_consultar_processo(mock_session):
mock_session.return_value.__aenter__.return_value.get.return_value = mock_response
instance = SecondInstance("TJ", "http://example.com")
result = await instance._consultar_processo("789")
assert result == "Sample Text"
| BrunoPisaneschi/JusBrasil | tests/unit/crawler/default/instances/test_second_instance.py | test_second_instance.py | py | 921 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "crawler.default.instances.second_instance.SecondInstance",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 15,
"usage_type": "call"
... |
31618419583 | import torch
from pathlib import Path
import copy
import time
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import pdb
import skimage
from distutils.version import LooseVersion
from skimage.transform import resize as sk_resize
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range, anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma)
else:
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range)
def minimize_mask(bbox, mask, mini_shape):
"""Resize masks to a smaller version to reduce memory load.
Mini-masks can be resized back to image scale using expand_masks()
See inspect_data.ipynb notebook for more details.
"""
mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
# Pick slice and cast to bool in case load_mask() returned wrong dtype
m = mask[:, :, i].astype(bool)
y1, x1, y2, x2 = bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
# Resize with bilinear interpolation
m = resize(m, mini_shape)
mini_mask[:, :, i] = np.around(m).astype(np.bool)
return mini_mask
def expand_mask(bbox, mini_mask, image_shape):
"""Resizes mini masks back to image size. Reverses the change
of minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
mask = np.zeros((mini_mask.shape[0],) +image_shape[:2] , dtype=bool)
for i in range(mask.shape[0]):
m = mini_mask[i, :, :]
y1, x1, y2, x2 = bbox[i][:4]
h = y2 - y1
w = x2 - x1
# Resize with bilinear interpolation
m = resize(m, (h, w))
mask[i, y1:y2, x1:x2] = np.around(m).astype(np.bool)
return mask
def unmold_mask(mask, bbox, image_shape):
"""Converts a mask generated by the neural network to a format similar
to its original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = np.where(mask >= threshold, 1, 0).astype(np.bool)
# Put the mask in the right location.
full_mask = np.zeros(image_shape, dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
return full_mask
def model_out_to_unmold(outputs28):
batch_size = outputs28.size(0)
outputs28_np = outputs28.detach().cpu().numpy() # has shape (batch_size, 1, 28, 28)
outputs28_np = outputs28_np[:, 0, :, :].transpose(1, 2, 0) # makes it (28, 28, batch_size)
preds224 = unmold_mask(outputs28_np, [0, 0, 223, 223], (224, 224, batch_size))[np.newaxis, ...]\
.transpose(3, 0, 1, 2)\
.astype(np.float32) # outputs (224,224, batch_size) - insert axis at 0, do another transpose
return torch.from_numpy(preds224)
def viz_prediction(track_sample, pred, epoch):
scans, label = track_sample
scans, label = scans.numpy().transpose((1, 2, 0)), label.numpy()[0][..., np.newaxis]
pred = pred[0].numpy()[..., np.newaxis]
scans_stack = np.concatenate([scans, label, pred], axis=-1)
fig = plt.figure(figsize=(20, 6))
fig.suptitle('TRACKING Sample')
for slice_, scan in enumerate(['dwi', 'flair', 't1', 't2', 'label', 'predicted']):
ax = plt.subplot(1, 6, slice_ + 1)
show_single_img(scans_stack[:, :, slice_], (scan == 'label' or scan == 'predicted'))
plt.tight_layout()
ax.set_title(scan)
ax.axis('off')
# plt.show()
plt.savefig('sample_tracking/'+ str(epoch)+ '.jpg')
def actual_predicted(actual, predicted, save_path):
fig = plt.figure(figsize=(10,5))
fig.suptitle('Actual-Predicted')
ax = plt.subplot(1, 2, 1)
show_single_img(actual)
plt.tight_layout()
ax.set_title('Actual')
ax.axis('off')
ax = plt.subplot(1, 2, 2)
show_single_img(predicted)
plt.tight_layout()
ax.set_title('Predicted')
ax.axis('off')
# plt.show()
plt.savefig(save_path)
def show_single_img(image, label=False):
"""Show image"""
cmap = 'gray'
if label:
cmap = 'binary'
plt.imshow(image, cmap = cmap)
def get_prob_map28(outputs28):
# based on argmax
max_prob, pred28_argmax = torch.max(outputs28, dim=1, keepdim=True) # (batch_size, 1, 28,28)
# based on prob
pred28 = outputs28.data
pred28[:, 0, :, :] = 1 - outputs28[:, 0, :, :]
one_hot = F.one_hot(pred28_argmax.squeeze()).permute(0, 3, 1, 2).bool() # (batch_size, 2 classes, 28,28)
pred28_prob = torch.sum(pred28 * one_hot, dim=1, keepdim=True) # (batch_size, 1 val, 28, 28)
# pdb.set_trace()
return pred28_prob
def dice_loss(input, target):
smooth = 1.
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + smooth) /
(iflat.sum() + tflat.sum() + smooth))
# borrow functions and modify it from https://github.com/Kaixhin/FCN-semantic-segmentation/blob/master/main.py
# Calculates class intersections over unions
def iou(pred, target):
ious = []
n_class = 2
for cls in range(n_class):
pred_inds = pred == cls
target_inds = target == cls
intersection = pred_inds[target_inds].sum()
union = pred_inds.sum() + target_inds.sum() - intersection
if union == 0:
ious.append(float('nan')) # if there is no ground truth, do not include in evaluation
else:
ious.append(float(intersection) / max(union, 1))
# print("cls", cls, pred_inds.sum(), target_inds.sum(), intersection, float(intersection) / max(union, 1))
return ious
def pixel_acc(pred, target):
correct = (pred == target).sum()
total = (target == target).sum()
return correct / total
def iou_boxes(box1, box2):
xa1, ya1, xa2, ya2 = box1
anchor_area = (ya2 - ya1) * (xa2 - xa1)
xb1, yb1, xb2, yb2 = box2
box_area = (yb2 - yb1) * (xb2 - xb1)
inter_x1 = max([xb1, xa1])
inter_y1 = max([yb1, ya1])
inter_x2 = min([xb2, xa2])
inter_y2 = min([yb2, ya2])
if (inter_x1 < inter_x2) and (inter_y1 < inter_y2):
iter_area = (inter_y2 - inter_y1 + 1) * \
(inter_x2 - inter_x1 + 1)
iou = iter_area / \
(anchor_area + box_area - iter_area)
else:
iou = 0.
return iou
def get_max_ious_boxes_labels(scans, label224, valid_boxes):
max_boxes = 10
mask = label224
# If there is some lesion on the mask, that is, if
if len(np.unique(mask)) != 1:
masked_labels = skimage.measure.label(mask)
# instances are encoded as different colors
obj_ids = np.unique(masked_labels)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = masked_labels == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[0])
xmax = np.max(pos[0])
ymin = np.min(pos[1])
ymax = np.max(pos[1])
boxes.append([xmin, ymin, xmax, ymax])
# only choose the top 10 boxes from this.
ious = np.empty((len(valid_boxes), len(boxes)), dtype=np.float32)
ious.fill(0)
for num1, i in enumerate(valid_boxes):
for num2, j in enumerate(boxes):
ious[num1, num2] = iou_boxes(i, j)
# choose the highest valued bounding boxes
patches_for_objs = max_boxes // num_objs
maxarg_ious = np.argsort(ious, axis=0)[::-1]
selected_ious_args = []
for obj in range(num_objs):
obj_max_indices = maxarg_ious[:patches_for_objs, obj].tolist()
maxarg_ious = np.delete(maxarg_ious, obj_max_indices, axis=0)
selected_ious_args.extend(obj_max_indices)
# Return, the selected anchor boxes coords and the class_labels
sel_anchors = valid_boxes[selected_ious_args]
# and the all ones class labels
class_labels = [1.0] * max_boxes
return sel_anchors, class_labels
# so there's no lesion at all in any part of the mask
else:
# box_for_scan_area
cornerVal = scans[0, 0, 0]
pos = np.where(scans[0, :, :] != cornerVal)
if len(pos[0]):
x1_scan = np.min(pos[0])
x2_scan = np.max(pos[0])
y1_scan = np.min(pos[1])
y2_scan = np.max(pos[1])
else:
return None
box = (x1_scan, y1_scan, x2_scan, y2_scan)
iou_vals = np.empty((len(valid_boxes)), dtype=np.float32)
for index, anchor_box in enumerate(valid_boxes):
iou_vals[index] = iou_boxes(anchor_box, box)
maxarg_ious = np.argsort(iou_vals, axis=0)[::-1][:max_boxes]
# Wont work as there s no way an entire anchor box in filled in this brain region
# filter valid bounding boxes
# valid_anchor_boxes_indices = np.where(
# (self.valid_anchor_boxes[:, 0] >= x1_scan) &
# (self.valid_anchor_boxes[:, 1] >= y1_scan) &
# (self.valid_anchor_boxes[:, 2] <= x2_scan) &
# (self.valid_anchor_boxes[:, 3] <= y2_scan)
# )[0]
sel_anchors = valid_boxes[maxarg_ious]
class_labels = [0.0] * max_boxes
return sel_anchors, class_labels
| hariharan98m/ischemic-stroke-detection | fcn_roialign/master2/utils.py | utils.py | py | 10,688 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "distutils.versio... |
12486515502 | import cv2
from datetime import datetime, timedelta
import geojson
from geotiff import GeoTiff
from models import Model0
import netCDF4
import numpy as np
import pandas as pd
import os
from os import path
from scipy import interpolate
from scipy.io import loadmat, savemat
import torch
import wget
import tarfile
import data
cosd = lambda x: np.cos(np.radians(x))
jdays = list(range(0,183+21,7))+list(range(329-21,364,7)) #load Julian days
def getconstfeatures(workdir, uregions, awsurl, print=print):
datadir = path.join(workdir,'..')
print(f"getconstfeatures: datadir={datadir} list={os.listdir(datadir)}")
file = path.join(workdir,'grid_cells.geojson')
print(f"Loading {file}")
with open(file) as f:
grid0 = geojson.load(f)
grid0 = pd.DataFrame([{'cell_id':g['properties']['cell_id'], 'region':g['properties']['region'],
'corners': np.array(g['geometry']['coordinates'])} for g in grid0['features']]).set_index('cell_id')
file = path.join(workdir,'ground_measures_metadata.csv')
print(f"Loading {file}")
stmeta0 = pd.read_csv(file).set_index('station_id')
stmetafile = path.join(workdir,'stmeta.csv')
gridfile = path.join(workdir,'grid.csv')
read = path.isfile(stmetafile) and path.isfile(gridfile)
if read:
print(f'Loading stmeta from {stmetafile} and grid from {gridfile}')
stmeta = pd.read_csv(stmetafile).set_index('station_id')
grid = pd.read_csv(gridfile).set_index('cell_id')
noex = set(stmeta0.index).difference(set(stmeta.index)).union(set(grid0.index).difference(set(grid.index)))
if len(noex) > 0:
print('unvalid stmeta / grid for {noex}')
read = False
else:
lonr = 1.5
lon1 = np.floor(min(grid['longitude'].values.min(),stmeta['longitude'].values.min())/lonr-1.)*lonr
lon2 = np.ceil(max(grid['longitude'].values.max(),stmeta['longitude'].values.max())/lonr+1.)*lonr
latr = 1.
lat1 = np.floor(min(grid['latitude'].values.min(),stmeta['latitude'].values.min())/latr-1.)*latr
lat2 = np.ceil(max(grid['latitude'].values.max(),stmeta['latitude'].values.max())/latr+1.)*latr
if not read:
print('Creating stmeta and grid')
grid = grid0
stmeta = stmeta0
gll = np.vstack(grid['corners'].values)
grid['latitude'] = gll[:,:,1].mean(1)
grid['longitude'] = gll[:,:,0].mean(1)
lonr = 1.5; latr = 1.
lon1 = np.floor(min(gll[:,:,0].min(),stmeta['longitude'].values.min())/lonr-1.)*lonr
lon2 = np.ceil(max(gll[:,:,0].max(),stmeta['longitude'].values.max())/lonr+1.)*lonr
lat1 = np.floor(min(gll[:,:,1].min(),stmeta['latitude'].values.min())/latr-1.)*latr
lat2 = np.ceil(max(gll[:,:,1].max(),stmeta['latitude'].values.max())/latr+1.)*latr
for lab in uregions:
grid[lab] = np.array([grid['region'][k]==lab for k in range(grid.shape[0])]).astype(np.float32)
stmeta[lab] = np.zeros(stmeta.shape[0])
for lab in ['CDEC', 'SNOTEL']:
stmeta[lab] = np.array([stmeta.index[k][:len(lab)]==lab for k in range(stmeta.shape[0])]).astype(np.float32)
grid[lab] = np.zeros(grid.shape[0])
rgauss = 2.0
def getaver (lon,lat,elev,r):
ry = r/(111.*(lat[1]-lat[0]))
rx = r/(111.*(lon[1]-lon[0])*cosd((lat1+lat2)*0.5))
av = elev.copy()
cv2.GaussianBlur(elev, (2*int(rgauss*rx)+1, 2*int(rgauss*ry)+1), rx, av, ry)
f = interpolate.interp2d(lon, lat, av, kind='linear')
return lambda lons, lats: np.array([f(lons[k], lats[k])[0] for k in range(lons.shape[0])])
demfile = f"dem_N{lat1}_{lat2}_W{-lon1}_{-lon2}.mat"
fname = path.join(datadir, demfile)
if not path.isfile(fname):
print('Creating DEM features')
dem = data.getdem(lat1,lat2,lon1,lon2,dir=path.join(datadir,'dem'), matfile=fname)
else:
print(f'Loading {demfile}')
dem = loadmat(fname)
demlon = dem.pop('lon').squeeze()
demlat = dem.pop('lat').squeeze()
print('Calculation DEM features')
for key in dem:
if key[:2] != '__':
elev = dem[key]
if key == 'elev':
rads = [3, 10, 30, 100]
f = getaver(demlon,demlat,elev,1.)
grid['elevation_m'] = f(grid['longitude'], grid['latitude'])
for r in rads:
f_av = getaver(demlon,demlat,elev,r)
name = 'elevation_'+str(r)
for d in [stmeta, grid]:
d[name] = f_av(d['longitude'], d['latitude']) - d['elevation_m']
else:
rads1 = [1, 3, 10, 30]
for r in rads1:
f_av = getaver(demlon,demlat,elev,r)
name = key+str(r)
for d in [stmeta, grid]:
d[name] = f_av(d['longitude'], d['latitude'])
ev = getaver(demlon,demlat,dem['elev'],1.)(stmeta['longitude'], stmeta['latitude'])
print(f"dem elevation/stmeta elevation = {ev/stmeta['elevation_m']}")
del demlon,demlat,dem
print('Loading GLOBCOVER')
for d in [stmeta, grid]:
for key in [key for key in d.keys() if key[:9]=='GLOBCOVER']:
d.pop(key)
ncname = path.join(datadir,'C3S-LC-L4-LCCS-Map-300m-P1Y-2020-v2.1.1.nc')
if not path.isfile(ncname):
arch = 'land_cover_map.tar.gz'
fname = path.join(datadir,arch)
if not path.isfile(fname):
print('Downloading '+arch)
wget.download(awsurl+arch, out=fname)
tar = tarfile.open(fname, "r:gz").extractall(datadir)
# ncname = path.join(datadir, tar.getmembers()[0].get_info()['name'])
os.remove(fname)
print(f'Loading GLOBCOVER from {ncname}')
nc = netCDF4.Dataset(ncname)
lon = np.array(nc.variables['lon'][:])
lat = np.array(nc.variables['lat'][:])
ok = ((lat>=lat1)&(lat<=lat2)).nonzero()[0]
ilat0 = ok[0]; ilat1 = ok[-1]+1
ok = ((lon>=lon1)&(lon<=lon2)).nonzero()[0]
ilon0 = ok[0]; ilon1 = ok[-1]+1
arr = np.array(nc.variables['lccs_class'][0,ilat0:ilat1,ilon0:ilon1])
lon = lon[ilon0:ilon1]
lat = lat[ilat0:ilat1]
nc.close()
printvalstat = lambda arr: print ({t: (arr==t).sum()/arr.size*100. for t in np.unique(arr.reshape(-1))})
printvalstat (arr)
arr[(arr>=10) & (arr<30)] = 30
arr[arr==110] = 100; arr[arr==120] = 100
arr[(arr>130)&(arr<160)] = 130
arr[arr==72] = 70; arr[arr==71] = 70
arr[arr==201] = 200
types = [30,70,90,100,130,200,210,220]
printvalstat (arr)
gstep=1./360.
# rads = [1, 3, 10, 30]
rads = [3]
print('Calculation GLOBCOVER features')
def calcfeatures(arr,types,gstep,prefix):
for t in types:
eq = (arr==t).astype(np.float32)
for r in rads:
ry = r/(111.*gstep)
rx = r/(111.*gstep*cosd((lat1+lat2)*0.5))
av = eq.copy()
cv2.GaussianBlur(eq, (2*int(rgauss*rx)+1, 2*int(rgauss*ry)+1), rx, av, ry)
for d in [stmeta, grid]:
ilon = ((d['longitude'].values-lon1)/(lon2-lon1)*arr.shape[1]).astype(np.int64)
ilat = ((lat2-d['latitude'].values)/(lat2-lat1)*arr.shape[0]).astype(np.int64)
d[prefix+str(t)+'_'+str(r)] = np.array([av[ilat[i]:ilat[i]+2,ilon[i]:ilon[i]+2].mean() for i in range(ilon.shape[0])])
del eq,av
calcfeatures(arr,types,gstep,'GLOBCOVER')
del arr
print('Loading SOIL')
for d in [stmeta, grid]:
for key in [key for key in d.keys() if key[:4]=='SOIL']:
d.pop(key)
tiffile = 'global_soil_regions_geoTIFF/so2015v2.tif'
tifname = path.join(datadir,tiffile)
if not path.isfile(tifname):
arch = 'soil_regions_map.tar.gz'
fname = path.join(datadir,arch)
if not path.isfile(fname):
print('Downloading '+arch)
wget.download(awsurl+arch, out=fname)
tar = tarfile.open(fname, "r:gz").extract('./'+tiffile, datadir)
os.remove(fname)
print(f'Loading SOIL from {tifname}')
arr = np.array(GeoTiff(tifname).read_box([(lon1,lat1),(lon2,lat2)]))
printvalstat (arr)
# types = [7,21,50,54,64,74,75,81,83,92]
arr[arr>10] = np.floor(arr[arr>10]/10)*10
arr[arr==5] = 7; arr[arr==6] = 7
printvalstat (arr)
types = [7,20,50,60,70,80,90]
# types = np.unique(arr.reshape(-1))
gstep = 1./30.
# rads = [3, 10, 30]
rads = [10]
print('Calculation SOIL features')
calcfeatures(arr,types,gstep,'SOIL')
del arr
# clm = 'ba'
# print('Loading '+clm)
# badir = path.join(datadir, clm+'-nc')
# if not path.isdir(badir):
# arch = 'burned_areas_occurrence_map.tar.gz'
# fname = path.join(datadir,arch)
# if not path.isfile(fname):
# print('Downloading '+arch)
# wget.download(awsurl+arch, out=fname)
# tar = tarfile.open(fname, "r:gz").extractall(datadir)
# os.remove(fname)
# rads = [10, 30]
# for jd in jdays:
# if all([clm+str(r)+'_'+str(jd) in grid for r in rads]):
# continue
# tday = (datetime(2001,1,1)+timedelta(days=jd)).strftime('%m%d')
# file = path.join(badir,'ESACCI-LC-L4-'+clm+'-Cond-500m-P13Y7D-2000'+tday+'-v2.0.nc')
# print(f'Loading {clm} {tday} from {file}')
# nc = netCDF4.Dataset(file)
# lon = np.array(nc.variables['lon'][:])
# lat = np.array(nc.variables['lat'][:])
# ok = ((lat>=lat1)&(lat<=lat2)).nonzero()[0]
# ilat0 = ok[0]; ilat1 = ok[-1]+1
# ok = ((lon>=lon1)&(lon<=lon2)).nonzero()[0]
# ilon0 = ok[0]; ilon1 = ok[-1]+1
# v = np.array(nc.variables[clm.lower()+'_occ'][ilat0:ilat1,ilon0:ilon1]).astype(np.float32)
# lon = lon[ilon0:ilon1]
# lat = lat[ilat0:ilat1]
# for r in rads:
# f = getaver(lon, lat, v, r)
# for d in [stmeta, grid]:
# d[clm+str(r)+'_'+str(jd)] = f (d['longitude'], d['latitude'])
# nc.close()
stmeta = stmeta.copy()
grid = grid.copy()
print('Saving stmeta to {stmetafile} and grid to {gridfile}')
stmeta.to_csv(stmetafile)
grid.to_csv(gridfile)
print({key: grid[key].mean() for key in grid.keys() if key not in ['region', 'corners']})
print({key: stmeta[key].mean() for key in stmeta.keys() if key not in ['name','state']})
print('Interpolate regions tags')
dtype = torch.float32
x = {'xlo': stmeta['longitude'].values, 'xla': stmeta['latitude'].values,
'ylo': grid['longitude'].values, 'yla': grid['latitude'].values}
x = {key: torch.tensor(x[key], dtype=dtype)[None] for key in x}
for lab in ['CDEC', 'SNOTEL']:
x['xval'] = torch.tensor(stmeta[lab].values, dtype=dtype)[None,:,None]
grid[lab] = Model0(x)[0,:,0].detach().numpy()
x = {key: x[('y' if key[0]=='x' else 'x')+key[1:]] for key in x if key[1:] in ['lo','la']}
for lab in uregions:
x['xval'] = torch.tensor(grid[lab].values, dtype=dtype)[None,:,None]
stmeta[lab] = Model0(x)[0,:,0].detach().numpy()
constfeatures = ['CDEC', 'elevation_m']
rads = [100, 30, 10, 3]
# rads = [100, 10]
# rads = [30, 10, 3]
constfeatures += ['elevation_'+str(r) for r in rads]
for d in [stmeta, grid]:
for r,r2 in zip(rads[1:],rads[:-1]):
d['elevation_'+str(r2)] -= d['elevation_'+str(r)]
# rads = [1, 3, 10, 30]
rads = [1, 3, 30]
for key in ['south', 'east']:
constfeatures += [key+str(r) for r in rads]
for r,r2 in zip(rads[1:],rads[:-1]):
for d in [stmeta, grid]:
# print([key,r2,np.abs(d[key+str(r2)]).mean(), r,np.abs(d[key+str(r)]).mean(),np.abs(d[key+str(r2)] - d[key+str(r)]).mean()])
d[key+str(r2)] -= d[key+str(r)]
rads = [1, 3, 10, 30]
for key in ['aspect']:
constfeatures += [key+str(r) for r in rads]
for r,r2 in zip(rads[1:],rads[:-1]):
for d in [stmeta, grid]:
d[key+str(r2)] -= d[key+str(r)]
# constfeatures += [key for key in grid if key[:9]=='GLOBCOVER' and key[-2:] in ['_1','10']] # and key[9:12] != '220'
# constfeatures += [key for key in grid if key[:4]=='SOIL' and key[-2:] in ['_3','30']]
constfeatures += [key for key in grid if key[:9]=='GLOBCOVER' and key[-2:] in ['_3']]
constfeatures += [key for key in grid if key[:4]=='SOIL' and key[-2:] in ['10']]
# constfeatures += [key for key in grid if (key[:9]=='GLOBCOVER') or (key[:4]=='SOIL')]
print(f"constfeatures : {constfeatures}")
return stmeta,grid,constfeatures | drivendataorg/snowcast-showdown | 1st Place/src/features/constfeatures.py | constfeatures.py | py | 13,858 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "numpy.cos",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,... |
74949364585 | import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from RNN_torch.model import RNN
# Hyper parameters
BATCH_SIZE = 64
EPOCH = 1
TIME_STEP = 28 # 考虑多少个时间点的数据
INPUT_SIZE = 1 # 每个时间点给RNN多少个数据点
LR = 0.01
rnn = RNN(INPUT_SIZE)
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
h_state = None
plt.figure(1, figsize=(12, 5))
plt.ion()
for step in range(50):
start, end = step * np.pi, (step + 1) * np.pi
# use sin pre cos
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
x_np = np.sin(steps)
y_np = np.cos(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape(batch, time_step, input_size)
y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
prediction, h_state = rnn(x, h_state)
h_state = h_state.data # !!! this step is important
loss = loss_func(prediction, y)
optimizer.zero_grad() # clear gradient for next train
loss.backward() # back propagation, compute gradient
optimizer.step()
# plot
plt.plot(steps, y_np.flatten(), 'r-')
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
plt.draw()
plt.pause(0.5)
plt.ioff()
plt.show()
| xjtulyc/PKU_Weekly_Summary_repo | 20220719/cs231n assignment/assignment_3.py | assignment_3.py | py | 1,352 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "RNN_torch.model.RNN",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.MSELos... |
19509126438 | import pandas as pd
import requests
from datetime import datetime
DISCORD_URL = "https://discord.com/api/v9/invites/UQZpTQbCT4?with_counts=true"
STARTED_AT = datetime.now()
request = requests.get(DISCORD_URL)
data = request.json()
new_dataframe = pd.json_normalize(data, max_level=2)
new_dataframe["_started_at"] = STARTED_AT.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
old_dataframe = pd.read_parquet("../data/discord.parquet")
current_dataframe = pd.concat([new_dataframe, old_dataframe])
current_dataframe.to_parquet("../data/discord.parquet", compression="gzip")
| ndrluis/soberana-data-poc | extract/scripts/discord.py | discord.py | py | 566 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.json_normali... |
70955291624 | """add admin flag to user
Revision ID: dd535b1f37a1
Revises: 4519159d3019
Create Date: 2019-01-06 13:39:21.042745
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dd535b1f37a1'
down_revision = '4519159d3019'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('users', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_admin', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('users', schema=None) as batch_op:
batch_op.drop_column('is_admin')
# ### end Alembic commands ###
| euphwes/cubers.io | migrations/versions/014_dd535b1f37a1_add_admin_flag_to_user.py | 014_dd535b1f37a1_add_admin_flag_to_user.py | py | 797 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.... |
5987566968 | from django.urls import path
from .views import ListingsView, ListingView, SearchView
# Declare the URL for the listings app here.
urlpatterns = [
path('', ListingsView.as_view(),name="ListALL"),
path('search', SearchView.as_view()),
path('<slug>', ListingView.as_view()), # Used for lising a particular view, not by PK(id) but by Slug field.
] | testusername190/Realest_Estate_Backend | backend/listings/urls.py | urls.py | py | 374 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.ListingsView.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.ListingsView",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.ur... |
4084609951 | import customtkinter as ctk
class ConfirmDeleteOldestBackupDialog(ctk.CTkToplevel):
def __init__(self, parent, controller, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
# Configure variables
self.controller = controller
self.label_text = "You are only allowed 10 backup files. If you save\nthis backup the oldest backup file will be deleted.\n\nAre you sure you want to continue with the backup?"
# Configure window
self.geometry("400x180")
self.title = f"Confirm delete last backup."
# Configure grid layout
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure((0, 1), weight=1)
# Create label
self.label = ctk.CTkLabel(self, text=self.label_text)
self.label.grid(row=0, column=0, columnspan=2, padx=20, pady=20,
sticky="nsew")
# Create button YES
self.yes_button = ctk.CTkButton(self, text="Yes", command=lambda: self.controller.save_backup_dialog_event(input=True, dialog=self))
self.yes_button.grid(row=1, column=0, padx=20, pady=20, sticky="nsew")
# Create button NO
self.no_button = ctk.CTkButton(self, text="Cancel", command=lambda: self.controller.save_backup_dialog_event(input=False, dialog=self))
self.no_button.grid(row=1, column=1, padx=20, pady=20, sticky="nsew")
| berndklare/flashcards | dialogs/confirm_delete_oldest_backup_dialog.py | confirm_delete_oldest_backup_dialog.py | py | 1,389 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "customtkinter.CTkToplevel",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "customtkinter.CTkLabel",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkButton",
"line_number": 26,
"usage_type": "call"
},
{
"api_... |
11917002254 | from django.db import models
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
import uuid
from users.models import Profile
from ckeditor.fields import RichTextField
# Create your models here.
def user_directory_path(instance,filename):
return 'blogs/{0}/{1}'.format(instance.id,filename)
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Blog(models.Model):
owner = models.ForeignKey(
Profile, null=True, blank=True, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
content = models.TextField()
likes = models.ManyToManyField(Profile,related_name="blogs",null=True,blank=True)
category = models.ForeignKey(Category,on_delete=models.PROTECT,default=1)
favorites = models.ManyToManyField(Profile,related_name='favorite',default=None,blank=True)
likes = models.ManyToManyField(Profile,related_name='like',default=None,blank=True)
featured_image = models.ImageField(null=True, blank=True,upload_to=user_directory_path, default="default.jpg")
created = models.DateTimeField(auto_now_add=True)
id = models.UUIDField(default=uuid.uuid4, unique=True,primary_key=True, editable=False)
def __str__(self):
return self.title
class Meta:
ordering = ['created']
@property
def imageURL(self):
try:
url = self.featured_image.url
except:
url = ''
return url
@property
def total_likes(self):
return self.likes.count()
@property
def total_comments(self):
return self.comments.count()
@property
def reviewers(self):
queryset = self.review_set.all().values_list('owner__id', flat=True)
return queryset
@property
def getVoteCount(self):
reviews = self.review_set.all()
upVotes = reviews.filter(value='up').count()
totalVotes = reviews.count()
ratio = (upVotes / totalVotes) * 100
self.vote_total = totalVotes
self.vote_ratio = ratio
self.save()
class Comment(models.Model):
owner = models.ForeignKey(Profile,null=True,blank=True,on_delete=models.CASCADE)
blog = models.ForeignKey(Blog,on_delete=models.CASCADE,related_name="comments")
content = models.TextField(null=True, blank=True)
created = models.DateTimeField(default=timezone.now)
id = models.UUIDField(default=uuid.uuid4,unique=True,primary_key=True,editable=False)
class Meta:
ordering = ['-created']
def __str__(self):
return f"comment by {self.owner}"
| minarefaat1002/blog_website | blogs project/blog/models.py | models.py | py | 2,654 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name"... |
4863814184 | import numpy as np
import pandas as pd
import itertools
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
# models that are being considered
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
"""
def grid_search_cross_validation(x_train, y_train, grid, model):
gridCV = GridSearchCV(model, grid, cv=10)
gridCV.fit(x_train, y_train.T.squeeze())
return gridCV.best_params_
def get_svc_best_params(x_train, y_train):
kernel = ['poly', 'sigmoid']
degree = [3, 4, 5]
tol = [ 10**(-3)]
grid = {
'kernel' : kernel,
'degree' : degree,
}
res = grid_search_cross_validation(x_train, y_train, grid, SVC())
print(res)
"""
def print_accuracy_scores(performance_data):
print('Accuracy scores:')
for i, data in enumerate(performance_data):
model_name = data[0]
pred = data[1]
test = data[2]
acc = metrics.accuracy_score(y_true=pred, y_pred=test, normalize=True)
print(model_name + ' accuracy: ', acc)
def print_f1_score(performance_data):
print('f1 scores:')
for i, data in enumerate(performance_data):
model_name = data[0]
pred = data[1]
test = data[2]
acc = metrics.f1_score(y_true=pred, y_pred=test, average='macro')
print(model_name + ' f1 score: ', acc)
def cross_validation_acc_score(x, y, clf):
skfold = StratifiedKFold(n_splits=10).split(x, y)
score = cross_val_score(clf, x, y, cv=skfold)
print('Accuracy {}%'.format(score.mean()*100))
"""
def find_model():
label='Vote'
x_train = pd.read_csv("x_train.csv", header=0)
y_train = pd.read_csv("y_train.csv", squeeze=True, header=None)
x_valid = pd.read_csv("x_valid.csv", header=0)
y_valid = pd.read_csv("y_valid.csv", squeeze=True, header=None)
x_test = pd.read_csv("x_test.csv", header=0)
y_test = pd.read_csv("y_test.csv", squeeze=True, header=None)
#get_random_forest_best_params(x_train, y_train)
x = x_train
y = y_train
# Best parameters for Random Tree Forest: {'criterion': 'gini', 'max_depth': 30, 'max_features': 'auto', 'min_samples_split': 2, 'n_estimators': 50}
rand_forest_clf = RandomForestClassifier(criterion='gini', max_depth=50, min_samples_split=5, n_estimators=50)
cross_validation_acc_score(x, y, rand_forest_clf)
rand_forest_clf.fit(x, y)
prediction_rand_forest = rand_forest_clf.predict(x_valid)
# Best parameters for SVC {'degree': 4, 'kernel': 'poly'}
svm_poly_clf = SVC(kernel='poly', degree=4, probability=True)
svm_poly_clf.fit(x, y)
prediction_svm_poly = svm_poly_clf.predict(x_valid)
# Multi-layer perceptron classifier
perceptron_clf = MLPClassifier(activation="relu", alpha=0.1, hidden_layer_sizes=(10, 10, 10),
learning_rate="constant", max_iter=2000)
perceptron_clf.fit(x, y)
prediction_perceptron = perceptron_clf.predict(x_valid)
estimators = [
('Random Forest', RandomForestClassifier(criterion='gini', max_depth=50, min_samples_split=5, n_estimators=50)),
('SVC', SVC(kernel='poly', degree=4, probability=True)),
('Percepton', MLPClassifier(activation="relu", alpha=0.1, hidden_layer_sizes=(10, 10, 10),
learning_rate="constant", max_iter=2000))
]
blend_clf = StackingClassifier(estimators)
blend_clf.fit(x, y)
prediction_blend = blend_clf.predict(x_valid)
# evaluate and plot confusion matrices
performance_data = [('Random Forest', prediction_rand_forest, y_valid),
('SVM Polinomial Kernel', prediction_svm_poly, y_valid),
('Perceptron', prediction_perceptron, y_valid),
('Blending ', prediction_blend, y_valid)
]
print_accuracy_scores(performance_data)
print_f1_score(performance_data)
prediction = prediction_blend
parties = np.unique(prediction)
num_votes_for_party = lambda party: len([vote for vote in prediction if vote == party])
list_of_parties = [(party, num_votes_for_party(party)) for party in parties]
num_votes = len(y_test.index)
winner = max(list_of_parties, key=lambda item: item[1])
print('Party with most probable majority of votes')
print(winner[0], ':', winner[1], ',', winner[1] * 100 / num_votes, '%')
# 2. Division of voters between the parties
print('Amount of votes per party')
for party_votes in sorted(list_of_parties, key=lambda votes: votes[1], reverse=True):
print(party_votes[0], ':', party_votes[1], ',', party_votes[1] * 100 / num_votes, '%')
"""
if __name__ == '__main__':
label = 'Vote'
x_train = pd.read_csv("x_train.csv", header=0)
y_train = pd.read_csv("y_train.csv", squeeze=True, header=None)
x_valid = pd.read_csv("x_valid.csv", header=0)
y_valid = pd.read_csv("y_valid.csv", squeeze=True, header=None)
x_test = pd.read_csv("x_test.csv", header=0)
y_test = pd.read_csv("y_test.csv", squeeze=True, header=None)
# get_random_forest_best_params(x_train, y_train)
x = x_train
y = y_train
estimators = [
('Random Forest', RandomForestClassifier(criterion='gini', max_depth=50, min_samples_split=5, n_estimators=50)),
('SVC', SVC(kernel='poly', degree=4, probability=True)),
('Percepton', MLPClassifier(activation="relu", alpha=0.1, hidden_layer_sizes=(10, 10, 10),
learning_rate="constant", max_iter=2000))
]
blend_clf = StackingClassifier(estimators)
blend_clf.fit(x, y)
prediction = blend_clf.predict(x_test)
# evaluate and plot confusion matrices
parties = np.unique(prediction)
num_votes_for_party = lambda party: len([vote for vote in prediction if vote == party])
list_of_parties = [(party, num_votes_for_party(party)) for party in parties]
num_votes = len(y_test.index)
winner = max(list_of_parties, key=lambda item: item[1])
print('Party with most probable majority of votes')
print(winner[0], ':', winner[1], ',', winner[1] * 100 / num_votes, '%')
# 2. Division of voters between the parties
print('Amount of votes per party')
for party_votes in sorted(list_of_parties, key=lambda votes: votes[1], reverse=True):
print(party_votes[0], ':', party_votes[1], ',', party_votes[1] * 100 / num_votes, '%')
performance_data = [('Blending ', prediction, y_test)]
print_accuracy_scores(performance_data)
print_f1_score(performance_data)
| grikkaq/ml_hw5 | elections_results.py | elections_results.py | py | 7,045 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 51,
"usage_type": "call"
},
{
"api_name"... |
28512647903 | # Import libraries
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
from sqlalchemy import create_engine
from googlesearch import search
from tqdm import tqdm
tqdm.pandas()
# Read data
df = pd.read_csv('data/user-item-interactions.csv')
df_content = pd.read_csv('data/articles.csv')
del df['Unnamed: 0']
del df_content['Unnamed: 0']
# <----- CLEAN DATA [start] ----->
# Remove duplicate articles
df_content = df_content.drop_duplicates(keep='first').reset_index(drop=True)
df_content = df_content.drop_duplicates(subset='article_id', keep='first').reset_index(drop=True)
# Format matching columns to same type
df = df.astype({'article_id': int})
# Make User-id column in df to identify users
user_id_dict = dict()
i=0
for email in df.email:
if email not in user_id_dict:
user_id_dict[email] = i
i+=1
df['user_id'] = df.email.apply(lambda x: user_id_dict[x])
df.drop('email', axis=1, inplace=True)
# Fill in missing document descriptions with empty strings
df_content.doc_description[df_content.doc_description.isnull()] = ''
# <----- CLEAN DATA [finished] ----->
# Merge data-sets on article id
df_merged = df.drop('title', axis=1).merge(df_content[['article_id', 'doc_full_name', 'doc_description']], on='article_id', how='outer')
# Fill in missing document titles
no_title_ids = df_merged.article_id[df_merged.doc_full_name.isnull()].unique().tolist()
for id in no_title_ids:
title = df.title[df.article_id == id].tolist()[0]
df_merged.doc_full_name[df_merged.article_id == id] = title
# Fill in missing descriptions with empty string
df_merged.doc_description[df_merged.doc_description.isnull()] = ''
# Make subset of merged dataframe and drop all duplicates
df_subset = df_merged[['article_id', 'doc_full_name', 'doc_description']].drop_duplicates(keep='first').reset_index(drop=True)
# Extract article links through google searches for all articles in the subset dataframe
doc_identifier = df_subset.doc_full_name + ' ' + df_subset.doc_description
def extract_link(text):
try:
link = list(search(text, tld="com", num=1, stop=1))[0]
except:
link = "https://www.google.com"
return link
df_subset['link'] = doc_identifier.progress_apply(extract_link)
# Distribute links to all rows of the merged dataframe
df_merged['link'] = df_merged.article_id.apply(lambda x: df_subset.link[df_subset.article_id==x].tolist()[0])
# Save data to database
engine = create_engine('sqlite:///data/data.db')
df_merged.to_sql('user-article-interactions', engine, index=False, if_exists='replace') | sameedakber-ai/ibm-recommendations-2 | data/process_data.py | process_data.py | py | 2,578 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.options",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm.pandas",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"li... |
26166080106 | import argparse
import os
import cv2
import matplotlib.pyplot as plt
import maxflow
import networkx as nx
import numpy as np
class GraphCuts:
def __init__(self, src, target, mask, save_graph=False):
"""
Initialize the graph and computes the min-cut.
:param src: image to be blended
:param target: background image
:param mask: manual mask with constrained pixels
:param save_graph: if true, graph is saved
"""
assert (src.shape == target.shape), \
f"Source and target dimensions must be same: {str(src.shape)} != {str(target.shape)}"
# Creating the graph and adding nodes
graph = maxflow.Graph[float]()
node_ids = graph.add_grid_nodes((src.shape[0], src.shape[1]))
self.compute_edge_weights(src, target) # self.edge_weights is inside func(compute_edge_weights)
# Adding non-terminal edges
patch_height = src.shape[0]
patch_width = src.shape[1]
for row_idx in range(patch_height):
for col_idx in range(patch_width):
# Horizontal edge
if col_idx + 1 < patch_width:
weight = self.edge_weights[row_idx, col_idx, 0]
graph.add_edge(node_ids[row_idx][col_idx],
node_ids[row_idx][col_idx + 1],
weight,
weight)
# Vertical edge
if row_idx + 1 < patch_height:
weight = self.edge_weights[row_idx, col_idx, 1]
graph.add_edge(node_ids[row_idx][col_idx],
node_ids[row_idx + 1][col_idx],
weight,
weight)
# Adding terminal edge capacities for the pixels constrained to belong to the source/sink.
# http://pmneila.github.io/PyMaxflow/maxflow.html
# 검토) add_tedge 대신 다른 api 쓸 순 없을까? np.inf 넣기 싫은데.
if np.array_equal(mask[row_idx, col_idx, :], [0, 255, 255]):
graph.add_tedge(node_ids[row_idx][col_idx], 0, np.inf)
elif np.array_equal(mask[row_idx, col_idx, :], [255, 128, 0]):
graph.add_tedge(node_ids[row_idx][col_idx], np.inf, 0)
# Plot graph
if save_graph:
nxg = graph.get_nx_graph()
self.plot_graph_2d(nxg, (patch_height, patch_width))
# 디버깅
# print('nxg {}'.format(nxg)) # nxg
# print('type of nxg {}'.format(type(nxg))) # type of nxg <class 'networkx.classes.digraph.DiGraph'>
# Computing maxflow / mincut
flow = graph.maxflow()
self.sgm = graph.get_grid_segments(node_ids)
def compute_edge_weights(self, src, target):
"""
Compute edge weights based on matching quality cost.
:param src: image to be blended (foreground)
:param target: background image
"""
self.edge_weights = np.zeros((src.shape[0], src.shape[1], 2))
# Create shifted versions of the matrics for vectorized operations.
src_left_shifted = np.roll(src, -1, axis=1)
target_left_shifted = np.roll(target, -1, axis=1)
src_up_shifted = np.roll(src, -1, axis=0)
target_up_shifted = np.roll(target, -1, axis=0)
eps = 1e-10 # Numerical stability
# Horizontal weights
horizontal_weight = np.sum(np.square(src - target, dtype=np.float) +
np.square(src_left_shifted - target_left_shifted, dtype=np.float),
axis=2)
horizontal_norm_factor = np.sum(np.square(src - src_left_shifted, dtype=np.float) +
np.square(target - target_left_shifted, dtype=np.float),
axis=2)
self.edge_weights[:, :, 0] = horizontal_weight / (horizontal_norm_factor + eps)
# Vertical weights
vertical_weight = np.sum(np.square(src - target, dtype=np.float) +
np.square(src_up_shifted - target_up_shifted, dtype=np.float),
axis=2)
vertical_norm_factor = np.sum(np.square(src - src_up_shifted, dtype=np.float) +
np.square(target - target_up_shifted, dtype=np.float),
axis=2)
self.edge_weights[:, :, 1] = vertical_weight / (vertical_norm_factor + eps)
def plot_graph_2d(self, graph, nodes_shape,
plot_weights=True,
plot_terminals=True,
font_size=7):
"""
Plot the graph to be used in graph cuts
:param graph: Maxflow graph
:param nodes_shape: patch shape
:param plot_weights: if true, edge weights are shown
:param plot_terminals: if true, the terminal nodes are shown
:param font_size: text font size
"""
X, Y = np.mgrid[:nodes_shape[0], :nodes_shape[1]]
aux = np.array([Y.ravel(), X[::-1].ravel()]).T
positions = {i: v for i, v in enumerate(aux)}
positions['s'] = (-1, nodes_shape[0] / 2.0 - 0.5)
positions['t'] = (nodes_shape[1], nodes_shape[0] / 2.0 - 0.5)
# nx.draw(graph, cmap=plt.get_cmap('jet')) maxflow로 안 가져오고 networkx에서 바로 그리기
plt.show()
nxgraph = graph.get_nx_graph()
print("nxgraph created")
if not plot_terminals:
nxgraph.remove_nodes_from(['s', 't'])
plt.clf()
nx.draw(nxgraph, pos=positions)
if plot_weights:
edge_labels = {}
for u, v, d in nxgraph.edges(data=True):
edge_labels[(u, v)] = d['weight']
nx.draw_networkx_edge_labels(nxgraph,
pos=positions,
edge_labels=edge_labels,
label_pos=0.3,
font_size=font_size)
plt.axis('equal')
plt.show()
def blend(self, src, target):
"""
Blends the target image with the source image based on the graph cut.
:param src: Source image
:param target: Target image
:return target : Blended image
"""
target[self.sgm] = src[self.sgm]
return target
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='image_dir', required=True, help='Saved Path of Source & Target Images.')
args = parser.parse_args()
# Read the images and the mask.
image_dir = args.image_dir
src = cv2.imread(os.path.join(image_dir, 'src.jpg'))
target = cv2.imread(os.path.join(image_dir, 'target.jpg'))
mask = cv2.imread(os.path.join(image_dir, 'mask.png'))
# Compute the min-cut.
graphcuts = GraphCuts(src, target, mask)
# Save the output.
target = graphcuts.blend(src, target)
cv2.imwrite(os.path.join(image_dir, "result.png"), target)
| c1a1o1/graphcut-textures | src/graphcut_textures.py | graphcut_textures.py | py | 7,197 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "maxflow.Graph",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.array_equal",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "numpy.array_equal... |
38191743871 | import json
leer = []
for linea in open('202006_movements.json','r'):
leer.append(json.loads(linea))
#print (linea)
datos = []
def leer():
for linea in open('202006_movements.json','r'):
datos.append(json.loads(linea))
def recogidasPorPunto():
resultado = dict()
for obj in datos:
clave = "Punto " + str(obj['idunplug_station'])
resultado[clave] = resultado.get(clave, 0) + 1
print(resultado)
resultadoSort = list()
for i in range(len(resultado)):
resultadoSort.append(resultado.get("Punto " + str(i)))
print (resultadoSort)
return
def recogidasPorEdad():
resultado = dict()
for obj in datos:
clave = "ageRange " + str(obj['ageRange'])
resultado[clave] = resultado.get(clave, 0) + 1
print(resultado)
resultadoSort = list()
for i in range(len(resultado)):
resultadoSort.append(resultado.get("ageRange " + str(i)))
print (resultadoSort)
return
def puntoRecYDev():
resultado = list()
for obj in datos:
if not obj["idplug_station"] in resultado:
if obj["idplug_station"] == obj["idunplug_station"]:
resultado.append(obj["idplug_station"])
resultado.sort()
print(resultado)
print(len(resultado))
return
leer()
recogidasPorEdad() | dalevale/GIW2020-21 | practica2.py | practica2.py | py | 1,371 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
}
] |
22161452898 | #!/usr/bin/env python3
import glob
import os.path
import re
import statistics
import sys
from collections import defaultdict
from typing import List, Dict
"""
USAGE:
./simple_spec_summary.py # all files in /spec/result/
./simple_spec_summary.py 1 10 # result 1-10 from /spec/result/
./simple_Spec_summary.py <list> <of> <csv> <files>
"""
def draw_table(table: List[List[str]], hline_after=()):
column_width = defaultdict(lambda: 0)
for row in table:
for i, col in enumerate(row):
column_width[i] = max(column_width[i], len(col))
txt = []
for i, row in enumerate(table):
for j, col in enumerate(row):
txt.append(col + ' ' * (column_width[j] - len(col)))
if j < len(row) - 1:
txt.append(' | ')
txt.append('\n')
if i in hline_after:
# txt.append('-' * (sum(column_width.values()) + 3 * len(row) - 3) + '\n')
txt.append('-|-'.join('-' * v for k, v in sorted(column_width.items(), key=lambda x: x[0])) + '\n')
return ''.join(txt)
def load_spec_files(files: List[str]) -> Dict[str, Dict[str, List[float]]]:
"""
:param files:
:return: {benchmark type: {benchmark name: [list, of, results]}}
"""
results = {}
for fname in files:
if not os.path.exists(fname):
print('MISSING FILE', fname)
continue
with open(fname, 'r') as f:
text = f.read()
name = [l[12:-1] for l in text.split('\n') if l.startswith('"test name: ')][0]
if name == 'llvm-o3-typegraph':
name = 'llvm-o3-typro'
if name not in results:
results[name] = {}
table = text.split('"Selected Results Table"')[1].split('"Run number:"')[0]
for l in table.split('\n'):
if l.startswith('4'):
elements = l.split(',')
if elements[2]:
bench_name = elements[0]
if re.match(r'\d{3}\.\w+', bench_name):
bench_name = bench_name.split('.', 1)[1]
if bench_name not in results[name]:
results[name][bench_name] = []
results[name][bench_name].append(float(elements[2]))
return results
def summarize_spec_files(files: List[str]):
results = load_spec_files(files)
assert 'llvm-o3-typro' in results, 'No typro runs!'
assert 'llvm-o3-ref' in results, 'No reference runs!'
benchmarks = list(sorted(results['llvm-o3-typro']))
table = [['Benchmark', 'Typro runtime (stdev)', 'Ref runtime (stdev)', 'Overhead']]
for bench in benchmarks:
runtime_typro = sum(results['llvm-o3-typro'][bench]) / len(results['llvm-o3-typro'][bench])
runtime_ref = sum(results['llvm-o3-ref'][bench]) / len(results['llvm-o3-ref'][bench])
stdev_typro = statistics.stdev(results['llvm-o3-typro'][bench]) / runtime_typro
stdev_ref = statistics.stdev(results['llvm-o3-ref'][bench]) / runtime_ref
overhead = runtime_typro / runtime_ref - 1
table.append([
bench,
f'{runtime_typro:5.1f} s (+-{stdev_typro*100:4.1f}%)',
f'{runtime_ref:5.1f} s (+-{stdev_ref*100:4.1f}%)',
f'{overhead * 100:5.2f}%'])
print(draw_table(table, (0,)))
if __name__ == '__main__':
if len(sys.argv) == 3 and re.match(r'\d+', sys.argv[1]) and re.match(r'\d+', sys.argv[2]):
files = []
for i in range(int(sys.argv[1]), int(sys.argv[2]) + 1):
files.append(f'/spec/result/CINT2006.{i:03d}.ref.csv')
files.append(f'/spec/result/CFP2006.{i:03d}.ref.csv')
summarize_spec_files(files)
elif len(sys.argv) > 1:
summarize_spec_files(sys.argv[1:])
else:
summarize_spec_files(glob.glob('/spec/result/*.ref.csv'))
| typro-type-propagation/TyPro-CFI | scripts/simple_spec_summary.py | simple_spec_summary.py | py | 3,896 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "os.path.path.exists",... |
25714644305 | import numpy as np
import matplotlib.pyplot as plt
def plot_with_exponential_averaging(x, y, label, alpha):
y_ema = [y[0],]
for y_i in y[1:]:
y_ema.append(y_ema[-1] * alpha + y_i * (1 - alpha))
p = plt.plot(x, y_ema, label=label)
plt.plot(x, y, color=p[0].get_color(), alpha=0.2)
def plot_train_result(result, label="", alpha=0.95, save_path="./", threshold=None):
rewards = [r['r'] for r in result]
lengths = [r['l'] for r in result]
plot_with_exponential_averaging(np.cumsum(lengths), rewards, label, alpha)
plt.axhline(y=threshold if threshold else int(max(rewards)*1.1), color='grey', linestyle='-')
plt.xlabel("Training Steps")
plt.ylabel("Episode Reward")
plt.legend()
plt.title(label)
plt.savefig(save_path)
plt.cla()
| olenmg/dopamine-rl | utils/plot.py | plot.py | py | 804 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplot... |
72287095464 | #!/usr/local/python3/bin/python3
import sys
sys.path.append("..")
import tushare as ts
import re
import datetime
import basicdata.basic_mgr as sk
import time
import os
import pandas as pd
g_update_newest=False #True|False
#是否下载最新的概念,一般不需要
g_ctcode_name=None
#g_ctcode_name['TS56']='电改'
g_tscode_concept=None
#g_tscode_concept['000008.SZ']={'id':['TS56','TS59'],'name':['电改','特斯拉']}
def get_ctname_by_tscode(pro, tscode):
global g_tscode_concept
if g_tscode_concept is None:
init_tscode_concept(pro)
return g_tscode_concept[tscode]['name']
def get_ctcode_by_tscode(pro, tscode):
global g_tscode_concept
if g_tscode_concept is None:
init_tscode_concept(pro)
return g_tscode_concept[tscode]['id']
def init_tscode_concept(pro):
global g_tscode_concept
global g_update_newest
if g_tscode_concept is None:
g_tscode_concept = {}
ts_codes=sk.get_tscodes(pro)
for i in range(len(ts_codes)):
ts_code=ts_codes[i]
path='./concept-data/'+ts_code+'.concept.csv'
if g_update_newest == False and os.path.exists(path) == True:
conceptdf=pd.read_csv(path)
else:
conceptdf=pro.concept_detail(ts_code=ts_code)
if conceptdf is not None:
conceptdf.to_csv(path)
time.sleep(1)
print("download", path)
if conceptdf is not None:
conceptids=conceptdf['id'].values.tolist()
conceptnames=conceptdf['concept_name'].values.tolist()
g_tscode_concept[ts_code]={'id':conceptids, 'name':conceptnames}
def get_concept_map(pro):
global g_ctcode_name
if g_ctcode_name is None:
init_ctcode_name(pro)
return g_ctcode_name
def get_name(pro, code):
global g_ctcode_name
if g_ctcode_name is None:
init_ctcode_name(pro)
return g_ctcode_name[code]
def init_ctcode_name(pro):
global g_ctcode_name
if g_ctcode_name is None:
g_ctcode_name = {}
conceptdf=pro.concept(src='ts')
conceptcodes=conceptdf['code'].values.tolist()
conceptnames=conceptdf['name'].values.tolist()
for i in range(len(conceptcodes)):
g_ctcode_name[conceptcodes[i]]= conceptnames[i]
if __name__== '__main__':
pro = ts.pro_api('08aedc1cc54171e54a64bbe834ec1cb45026fa2ab39e9e4cb8208cad')
init_ctcode_name(pro)
print(g_ctcode_name)
print(get_name(pro, 'TS2'))
print(get_ctcode_by_tscode(pro, '600848.SH'))
#conceptdf.to_csv('./concept.csv')
| haianhua/stock | stock/conceptdata/concept_mgr.py | concept_mgr.py | py | 2,624 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "basicdata.basic_mgr.get_tscodes",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "basicdata... |
43109381353 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 18 21:28:29 2021
@author: apolloseeds
"""
from dataset import *
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import loadmat
from sklearn import model_selection
from toolbox_02450 import train_neural_net, draw_neural_net, visualize_decision_boundary
import torch
from scipy import stats
from toolbox_02450 import feature_selector_lr, bmplot, rlr_validate, mcnemar
from toolbox_02450 import train_neural_net, draw_neural_net, visualize_decision_boundary
N2, M2 = contX.shape
def trainANN(X,y,h, K = 10): #returns the optimal h (number of hidden units)
CV = model_selection.KFold(K,shuffle=True)
n_replicates = 1 # number of networks trained in each k-fold
max_iter = 10000
# Define the model structure
# The lambda-syntax defines an anonymous function, which is used here to
# make it easy to make new networks within each cross validation fold
model = lambda: torch.nn.Sequential(
torch.nn.Linear(M2, h), #M features to H hiden units
# 1st transfer function, either Tanh or ReLU:
torch.nn.Tanh(), #torch.nn.ReLU(),
torch.nn.Linear(h, 1) # H hidden units to 1 output neuron
)
loss_fn = torch.nn.MSELoss() # notice how this is now a mean-squared-error loss
print('Training model of type:\n\n{}\n'.format(str(model())))
errors = [] # make a list for storing generalizaition error in each loop
for (k, (train_index, test_index)) in enumerate(CV.split(X,y)):
print('\nCrossvalidation fold: {0}/{1}'.format(k+1,K))
# Extract training and test set for current CV fold, convert to tensors
X_train = torch.Tensor(X[train_index,:])
y_train = torch.Tensor(y[train_index])
X_test = torch.Tensor(X[test_index,:])
y_test = torch.Tensor(y[test_index])
for i in range(0, len(h)):
#Iterate over every h
testedH = h[i]
# Train the net on training data
net, final_loss, learning_curve = train_neural_net(model,
loss_fn,
X=X_train,
y=y_train,
n_replicates=n_replicates,
max_iter=max_iter)
print('\n\tBest loss: {}\n'.format(final_loss))
# Determine estimated class labels for test set
y_sigmoid = net(X_test)
y_test_est = (y_sigmoid>.5).type(dtype=torch.uint8)
# Determine errors and errors
y_test = y_test.type(dtype=torch.uint8)
e = y_test_est != y_test
error_rate = (sum(e).type(torch.float)/len(y_test)).data.numpy()
errors.append(error_rate) # store error rate for current CV fold
optimalHIndex = errors.index(min(errors))
optimalH = h[optimalHIndex]
# Print the average classification error rate
print('\nEstimated generalization error, RMSE: {0}'.format(round(np.sqrt(np.mean(errors)), 4)))
return optimalH
def annRegression(X_train, X_test, y_train, y_test, hRange, K = 10):
# Parameters for neural network classifier
n_replicates = 1 # number of networks trained in each k-fold
max_iter = 10000 # stop criterion 2 (max epochs in training)
loss_fn = torch.nn.MSELoss() # notice how this is now a mean-squared-error loss
opt_hidden_unit = trainANN(X_train, y_train, hRange, K)
model = lambda: torch.nn.Sequential(
torch.nn.Linear(M, opt_hidden_unit), #M features to H hiden units
torch.nn.Tanh(), # 1st transfer function,
torch.nn.Linear(opt_hidden_unit, 1), # H hidden units to 1 output neuron
)
# print('Training model of type:\n\n{}\n'.format(str(model())))
X_train = torch.Tensor(X_train)
y_train = torch.Tensor(y_train)
X_test = torch.Tensor(X_test)
y_test = torch.Tensor(y_test)
# Train the net on training data
net, final_loss, learning_curve = train_neural_net(model,
loss_fn,
X=X_train,
y=y_train,
n_replicates=n_replicates,
max_iter=max_iter)
print('\n\tBest loss: {}\n'.format(final_loss))
# Determine estimated class labels for test set
y_test_est = net(X_test)
# Determine errors and errors
se = (y_test_est.float()-y_test.float())**2 # squared error
mse = (sum(se).type(torch.float)/len(y_test)).data.numpy() #mean
return opt_hidden_unit, mse, y_test_est
C = 2
# Normalize data
annX = stats.zscore(contX)
# Parameters for neural network classifier
h = 1 # number of hidden units, !!!!SELECT A RANGE BY TESTING
serumC = np.array(np.asarray(X[:, 7]), dtype=int)
#y_rings = np.array(np.asarray(rings), dtype=np.int).reshape(-1, 1)
K = 5
lambdas = np.linspace(0.01, 10, 1000)
inner_cvf = 10
CV = model_selection.KFold(K, shuffle=True)
coefficient_norm = np.zeros(K)
# Parameters for neural network classifier
hRange = range(1, 8)
n_replicates = 2 # number of networks trained in each k-fold
max_iter = 10000 # stop criterion 2 (max epochs in training)
square_err_regression_base = np.empty(K)
square_err_regression_RLR = np.empty(K)
square_err_regression_ANN = np.empty(K)
regression_RLR_opt_lambdas = np.empty(K)
regression_opt_hidden_units = np.empty(K)
error_rate_classification_base = np.empty(K)
error_rate_classification_logistic = np.empty(K)
error_rate_classification_ANN = np.empty(K)
classification_opt_hidden_units = np.empty(K)
classification_opt_lambdas = np.empty(K)
w_est_logistic_arr = np.empty((K, X.shape[1]))
y_est_Reg_ANN = []
y_est_Reg_RLR = []
y_est_claf_ANN = []
y_est_claf_logistic = []
y_sex_real = []
y_rings_real = []
for k, (train_index, test_index) in enumerate(CV.split(annX,serumC)):
X_train = annX[train_index,:]
X_test = annX[test_index,:]
y_train = serumC[train_index]
y_test = serumC[test_index]
"""
y_rings_train = y_rings[train_index]
y_rings_test = y_rings[test_index]
y_sex_real.append(y_sex_test)
y_rings_real.append(y_rings_test)
"""
regression_opt_hidden_unit, ANN_mse, y_est_ANN_regression = annRegression(X_train, X_test, y_train, y_test, hRange, inner_cvf)
regression_opt_hidden_units[k] = regression_opt_hidden_unit
square_err_regression_ANN[k] = ANN_mse
y_est_Reg_ANN.append(y_est_ANN_regression)
print("square_err_regression_ANN: ", square_err_regression_ANN)
| ralph-elhaddad/02450-Intro-ML | Project2/2b.py | 2b.py | py | 7,248 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.model_selection.KFold",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 34,
"usage_type": "call"
},
{
"api_nam... |
35018437018 | import line
import cv2
import time
import serial
# Camera
vid = cv2.VideoCapture(0)
# Elegoo
power_forward = 100
power_sideway_minimal = 130
power_sideway_maximal = 200
compteur = 0
ips = 0
after = time.time() + 1
imprimer_taille_image = True
left_begin = 0
left_end = 85
right_begin = 95
right_end = 180
compteur_did_not_find_lines = 0
def power_engine_from_angle(begin, end, angle):
diff = end - begin
diff_angle_percentage = angle / diff
power = power_sideway_minimal + ((power_sideway_maximal - power_sideway_minimal) * diff_angle_percentage)
if power > 255:
power = 255
return int(power)
def send_command(left, right):
try:
cmd = str(left) + ',' + str(right) + ','
arduino.write(cmd.encode())
time.sleep(0.1) # wait for arduino to answer
arduino.flushOutput()
arduino.flushInput()
except Exception as ex:
print(ex)
if __name__ == '__main__':
with serial.Serial("/dev/ttyACM0", 9600, timeout=1) as arduino:
time.sleep(0.1) # wait for serial to open
video = input("Voulez vous la vidéo ? Y or N ")
if video == "Y":
video = True
else:
video = False
suivi = input("Voulez vous le suivi de commande ? Y or N ")
if suivi == "Y":
suivi = True
else:
suivi = False
hist_size = input("Quelle taille d'historique voulez vous ? > 0")
angle_hist = line.Historique(hist_size=int(hist_size))
if arduino.isOpen():
print("{} connected!".format(arduino.port))
# Detection de ligne
while True:
ret, original = vid.read()
ips, compteur, after = line.caclulate_ips(ips, compteur, after)
# si ips == 0 alors les ips ne sont pas affiché
angle, size, img_line_plus_mean, did_not_find_lines = line.line_detection(hist=angle_hist, ips=ips,
display_image=False,
display_mean=video,
original_picture=original)
# print image size once
if imprimer_taille_image:
print(size)
imprimer_taille_image = False
# stop the program by pressing q
if cv2.waitKey(1) & 0xFF == ord('q') & video:
break
if did_not_find_lines:
compteur_did_not_find_lines += 1
# Reaction to angle
# Les moteur sont inversé
# ENA, ENB
if did_not_find_lines and compteur_did_not_find_lines > 10:
commande = "Backward"
send_command(10, 10) # ceci est un code
power = power_forward
compteur_did_not_find_lines = 0
elif left_end > angle >= left_begin:
commande = "left"
power = power_engine_from_angle(left_begin, left_end, angle)
send_command(power, 0) # Le robot tourna a droite peu efficace
elif right_end >= angle > right_begin:
commande = "right"
power = power_engine_from_angle(right_begin, right_end, angle)
send_command(0, power) # Le robot toune a gauche tres efficace
elif right_begin >= angle >= left_end:
commande = "Forward"
send_command(power_forward, power_forward)
power = power_forward
if suivi:
print("Commande = " + commande + " " * (10-len(commande)) + " Angle = " + str(angle) + " " * (10-len(str(angle))) + " Power_engine = " + str(power))
| GuillaumeCariou/I3S_Tutorship_Internship | Python/Line_Following/Line/main_rgb.py | main_rgb.py | py | 3,992 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number... |
10660236943 | # coding=utf-8
import mysql.connector
from mysql.connector import Error
import requests
import json
import datetime
dias_semana = ['Domingo', 'Segunda-feira', 'Terça-feira', 'Quarta-feira', 'Quinta-feira', 'Sexta-feira', 'Sábado']
try:
# recupera dataset do chat
url_json = "http://raw.githubusercontent.com/camilabianchi/graces_desafio/master/datasets/chatOnline.jsonl"
req = requests.get(url_json)
dicionario = json.loads(req.text)
if len(dicionario) > 0:
# abre conexao com o banco
connection = mysql.connector.connect(host='localhost', port='3306', database='[db]', user='[user]',
password='[pwd]')
# percorre registros
for item in dicionario:
# data em formato string
data_inicio_str = item["Data da conversa (Inicio)"].replace("/", "-")
data_fim_str = item["Data da conversa (Fim)"].replace("/", "-")
# calculo data final com base na duracao da chamada
dt_inicio = datetime.datetime.strptime(data_inicio_str, '%d-%m-%Y %H:%M')
dt_termino = datetime.datetime.strptime(data_fim_str, '%d-%m-%Y %H:%M')
# valores do insert
email = item["Visitor_Email"]
nome = item["Visitor_Email"]
agente = item["Agente"]
status = "Atendido" if item["Atendido"] == "Sim" else "Não atendido"
origem = 'Chat'
semana = 0 if dt_inicio.weekday() == 6 else dt_inicio.weekday() + 1
semana_nome = dias_semana[semana]
if connection.is_connected():
cursor = connection.cursor()
sql_insert = """INSERT INTO contatos(email, nome, data_inicio, data_termino, agente, status, origem, semana, semana_nome)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) """
record = (email, nome, dt_inicio, dt_termino, agente, status, origem, semana, semana_nome)
try:
cursor.execute(sql_insert, record)
connection.commit()
except Error as e:
sql_insert = """INSERT INTO log_erros(log_mensagem) VALUES (%s) """
record = (e.msg.replace("'", ""),)
cursor.execute(sql_insert, record)
connection.commit()
finally:
cursor.close()
# fecha conexao com o banco
if connection.is_connected():
connection.close()
except Error as e:
print("Error while connecting to MySQL", e.msg)
| camilabianchi/graces_desafio | 2_importacao_python_airflow/importa_chat.py | importa_chat.py | py | 2,617 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mysql.conne... |
6864404682 | import math, random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
from collections import deque
env_id = "CartPole-v0"
env = gym.make(env_id)
# env = env.unwrapped
path = "/Users/saumya/Desktop/CriticalStates_results/"
results_dir = "vanillaDQN"
'''
Double DQN code adapted and modified from https://github.com/higgsfield/RL-Adventure/blob/master/2.double%20dqn.ipynb
'''
class ReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return np.concatenate(state), action, reward, np.concatenate(next_state), done
def __len__(self):
return len(self.buffer)
class DQN(nn.Module):
def __init__(self, num_inputs, num_actions):
super(DQN, self).__init__()
self.layers = nn.Sequential(
# nn.Linear(env.observation_space.shape[0], 128),
# nn.ReLU(),
# nn.Linear(128, 128),
# nn.ReLU(),
# nn.Linear(128, env.action_space.n)
# Function approximator for Q function - modified to less hidden neurons
nn.Linear(env.observation_space.shape[0], 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, env.action_space.n)
)
def forward(self, x):
return self.layers(x)
def act(self, state, epsilon):
"""
choose action using epsilon-greedy strategy
"""
if random.random() > epsilon:
state = Variable(torch.FloatTensor(state).unsqueeze(0), volatile=True)
q_value = self.forward(state)
action = q_value.max(1)[1].item()
else:
action = random.randrange(env.action_space.n)
return action
def update_target(current_model, target_model):
target_model.load_state_dict(current_model.state_dict())
def compute_td_loss(batch_size):
"""
Compute the TD loss after sampling transitions(of size - "batch_size") from the replay buffer
"""
state, action, reward, next_state, done = replay_buffer.sample(batch_size)
state = Variable(torch.FloatTensor(np.float32(state)))
next_state = Variable(torch.FloatTensor(np.float32(next_state)))
action = Variable(torch.LongTensor(action))
reward = Variable(torch.FloatTensor(reward))
done = Variable(torch.FloatTensor(done))
q_values = current_model(state)
next_q_values = current_model(next_state)
next_q_state_values = target_model(next_state)
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value = next_q_state_values.gather(1, torch.max(next_q_values, 1)[1].unsqueeze(1)).squeeze(1)
expected_q_value = reward + gamma * next_q_value * (1 - done)
loss = (q_value - Variable(expected_q_value.data)).pow(2).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
def plot(frame_idx, rewards, losses, iter):
# clear_output(True)
plt.figure(figsize=(20,5))
plt.subplot(131)
# plt.title('frame %s. reward: %s' % (frame_idx, np.mean(rewards[-10:])))
plt.title('frame %s' % (frame_idx))
plt.plot(rewards)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.savefig(path+results_dir+"/cartpole_dqn_plots_iter_"+str(iter))
def load_model(model_path):
current_model = DQN(env.observation_space.shape[0], env.action_space.n)
current_model.load_state_dict(torch.load(model_path,map_location=torch.device('cpu')))
return current_model
def play(model_path):
"""
Play or rollout the learnt policy and observe the mean reward obtained over 1000 episodes
"""
current_model = load_model(model_path)
avg_test_reward = []
for t in range(1000):
# print('play: ',t)
state = env.reset()
done = False
reward_per_episode = 0
while not done:
action = current_model.act(state, 0)
next_state, reward, done, info = env.step(action)
# env.render()
reward_per_episode+=reward
if done:
# print('rewards: ',reward_per_episode)
avg_test_reward.append(reward_per_episode)
break
else:
state = next_state
env.close()
print(np.mean(avg_test_reward))
if __name__ == "__main__":
## Hyperparameters
epsilon_start = 1.0
epsilon_final = 0.01
epsilon_decay = 500
num_frames = 400000 # increased num of timesteps from 160000
batch_size = 64
gamma = 0.99
update_target_net = 100
learning_rate = 1e-4 # reduced learning rate from 1e-3
epsilon_by_frame = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) * math.exp(
-1. * frame_idx / epsilon_decay)
## Running for 5 iteration to obtain a mean and std of the reward plots
for iter in range(5):
print("iteration: ",iter)
current_model = DQN(env.observation_space.shape[0], env.action_space.n)
target_model = DQN(env.observation_space.shape[0], env.action_space.n)
if USE_CUDA:
current_model = current_model.cuda()
target_model = target_model.cuda()
optimizer = optim.Adam(current_model.parameters(), lr = learning_rate)
replay_buffer = ReplayBuffer(100000) # increased buffer size from 1000
update_target(current_model, target_model)
losses = []
all_rewards = []
episode_reward = 0
ep_num = 0
## If the environment is solved is_win is set true
is_win = False
state = env.reset()
for frame_idx in range(1, num_frames + 1):
epsilon = epsilon_by_frame(frame_idx)
action = current_model.act(state, epsilon)
next_state, reward, done, _ = env.step(action)
replay_buffer.push(state, action, reward, next_state, done)
state = next_state
episode_reward += reward
if done:
state = env.reset()
all_rewards.append(episode_reward)
episode_reward = 0
ep_num+=1
avg_reward = float(np.mean(all_rewards[-100:]))
print('Best 100-episodes average reward', ep_num, avg_reward)
## Using the following "solving" criteria
if len(all_rewards) >= 100 and avg_reward >= 198 and all_rewards[-1] > 198:
if not is_win:
is_win = True
torch.save(current_model.state_dict(), path+results_dir+'/CartPole_dqn_model_iter_'+str(iter))
print('Ran %d episodes best 100-episodes average reward is %3f. Solved after %d trials ✔' % (
ep_num, avg_reward, ep_num - 100))
last_saved = ep_num
torch.save(current_model.state_dict(),
path+results_dir+'/Final_CartPole_dqn_model_iter_' + str(
iter))
## Update the loss
if len(replay_buffer) > batch_size:
loss = compute_td_loss(batch_size)
losses.append(loss.item())
if frame_idx % 200 == 0:
plot(frame_idx, all_rewards, losses, iter)
## Update the target network
if frame_idx % update_target_net == 0:
update_target(current_model, target_model)
## Save the reward list - rewards obtained per episode
np.save(path+results_dir+"/rewards_iter_"+str(iter),all_rewards)
if not is_win:
print('Did not solve after %d episodes' % ep_num)
torch.save(current_model.state_dict(), path+results_dir+'/CartPole_dqn_model_iter_'+str(iter))
# play(path+results_dir+'/CartPole_dqn_model_iter_'+str(iter))
# play(path+results_dir+'/Final_CartPole_dqn_model_iter_' + str(iter))
# Iteration: 0
# 199.969
# 200.0
# iteration: 1
# 200.0
# 195.842
# iteration: 2
# 200.0
# 182.442
# iteration: 3
# 200.0
# 200.0
# iteration: 4
# 197.461
# 199.972
| saumyasinha/learning_better_policies_with_critical_states | Qlearning/dqn_for_CartPole.py | dqn_for_CartPole.py | py | 8,842 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch... |
20405590464 | import matplotlib.pyplot as plt
from tespy.networks import Network
from tespy.connections import Connection
from tespy.components import (Source, Sink, Condenser, Pump)
# Create a TESPy network
nw = Network(fluids=['water', 'NH3'])
# Add components and connections to the network
source = Source('source')
sink = Sink('sink')
condenser = Condenser('condenser')
pump = Pump('pump')
nw.add_conns(Connection(source, 'out1', condenser, 'in1'))
nw.add_conns(Connection(condenser, 'out1', sink, 'in1'))
nw.add_conns(Connection(condenser, 'out2', pump, 'in1'))
nw.add_conns(Connection(pump, 'out1', condenser, 'in2'))
# Solve the network
nw.solve('design')
# Extract the components and connections information
components = nw.components.keys()
connections = nw.connections.keys()
# Create a figure and axis
fig, ax = plt.subplots()
# Plot the components
for component in components:
x = nw.components[component].x
y = nw.components[component].y
ax.scatter(x, y, label=component)
# Plot the connections
for connection in connections:
x = [nw.connections[connection].inl.x, nw.connections[connection].outl.x]
y = [nw.connections[connection].inl.y, nw.connections[connection].outl.y]
ax.plot(x, y, '-', label=connection)
# Add labels and legend
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.legend()
# Show the plot
plt.show()
| JubranKhattab/testing_tespy_projects | subsystems/ploting.py | ploting.py | py | 1,346 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tespy.networks.Network",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tespy.components.Source",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tespy.components.Sink",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "t... |
10836961705 | import pandas as pd
from flask import Flask, jsonify, request,json
import pickle
model = pickle.load(open('model.pkl','rb'))
app = Flask(__name__)
@app.route('/', methods=['POST'])
def predict():
# get data
body_dict = json.loads(request.get_data().decode('utf-8'))
data = body_dict['0']
# predictions
prediction=[]
for v in data.values():
p=model.predict([v]).tolist()
#print(p)
prediction.append(p[0])
#prediction = model.predict([data['0']]).tolist()
#print(prediction)
result = {'prediction': prediction}
# return data
return jsonify(prediction)
if __name__ == '__main__':
app.run(port = 5000, debug=True) | liJiansheng/Catchup | LR Model API/app.py | app.py | py | 695 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.json",
"line_number"... |
18113301417 | import pygame
#зарускаем программу
pygame.init()
#add colors
black=( 0, 0, 0)
white=( 255, 255, 255)
green=( 0, 255, 0)
red=( 255, 0, 0)
size = [700,700]
screen=pygame.display.set_mode(size)
pygame.display.set_caption("Professor Craven's Cool Game")
done = True
clock=pygame.time.Clock()
screen.fill(white)
pygame.display.flip()
while done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done=False
clock.tick(20)
pygame.quit()
| AndreiTsukov/PythonFiles | Classwork/pygame/lesson1/snegovik.py | snegovik.py | py | 532 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
8446584828 |
import warnings
from cupy import testing
import cupyx.scipy.signal.windows as cu_windows
import pytest
from pytest import raises as assert_raises
try:
import scipy.signal.windows as cpu_windows # NOQA
import scipy.fft # NOQA
except ImportError:
pass
window_funcs = [
('boxcar', ()),
('triang', ()),
('parzen', ()),
('bohman', ()),
('blackman', ()),
('nuttall', ()),
('blackmanharris', ()),
('flattop', ()),
('bartlett', ()),
('barthann', ()),
('hamming', ()),
('kaiser', (1,)),
('gaussian', (0.5,)),
('general_gaussian', (1.5, 2)),
('chebwin', (1,)),
('cosine', ()),
('hann', ()),
('exponential', ()),
('taylor', ()),
('tukey', (0.5,)),
]
class TestBartHann:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_basic(self, xp, scp):
w1 = scp.signal.windows.barthann(6, sym=True)
w2 = scp.signal.windows.barthann(7)
w3 = scp.signal.windows.barthann(6, False)
return w1, w2, w3
class TestBartlett:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_basic(self, xp, scp):
w1 = scp.signal.windows.bartlett(6)
w2 = scp.signal.windows.bartlett(7)
w3 = scp.signal.windows.bartlett(6, False)
return w1, w2, w3
class TestBlackman:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_basic(self, xp, scp):
return (scp.signal.windows.blackman(6, sym=False),
scp.signal.windows.blackman(7, sym=False),
scp.signal.windows.blackman(6),
scp.signal.windows.blackman(7, True))
class TestBlackmanHarris:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_basic(self, xp, scp):
return (scp.signal.windows.blackmanharris(6, False),
scp.signal.windows.blackmanharris(7, sym=False),
scp.signal.windows.blackmanharris(6),
scp.signal.windows.blackmanharris(7, sym=True))
class TestTaylor:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_normalized(self, xp, scp):
"""Tests windows of small length that are normalized to 1. See the
documentation for the Taylor window for more information on
normalization.
"""
w1 = scp.signal.windows.taylor(1, 2, 15)
w2 = scp.signal.windows.taylor(6, 2, 15)
return w1, w2
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-15, atol=1e-15)
def test_non_normalized(self, xp, scp):
"""Test windows of small length that are not normalized to 1. See
the documentation for the Taylor window for more information on
normalization.
"""
return (scp.signal.windows.taylor(5, 2, 15, norm=False),
scp.signal.windows.taylor(6, 2, 15, norm=False))
@testing.numpy_cupy_allclose(scipy_name='scp')
def test_correctness(self, xp, scp):
"""This test ensures the correctness of the implemented Taylor
Windowing function. A Taylor Window of 1024 points is created, its FFT
is taken, and the Peak Sidelobe Level (PSLL) and 3dB and 18dB bandwidth
are found and checked.
A publication from Sandia National Laboratories was used as reference
for the correctness values [1]_.
References
-----
.. [1] Armin Doerry, "Catalog of Window Taper Functions for
Sidelobe Control", 2017.
https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf
"""
M_win = 1024
N_fft = 131072
# Set norm=False for correctness as the values obtained from the
# scientific publication do not normalize the values. Normalizing
# changes the sidelobe level from the desired value.
w = scp.signal.windows.taylor(
M_win, nbar=4, sll=35, norm=False, sym=False)
f = scp.fft.fft(w, N_fft)
spec = 20 * xp.log10(xp.abs(f / xp.amax(f)))
first_zero = xp.argmax(xp.diff(spec) > 0)
PSLL = xp.amax(spec[first_zero:-first_zero])
BW_3dB = 2 * xp.argmax(spec <= -3.0102999566398121) / N_fft * M_win
BW_18dB = 2 * xp.argmax(spec <= -18.061799739838872) / N_fft * M_win
return PSLL, BW_3dB, BW_18dB
class TestBohman:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.bohman(6),
scp.signal.windows.bohman(7, sym=True),
scp.signal.windows.bohman(6, False))
class TestBoxcar:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.boxcar(6),
scp.signal.windows.boxcar(7),
scp.signal.windows.boxcar(6, False))
class TestChebWin:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
ret = (scp.signal.windows.chebwin(6, 100),
scp.signal.windows.chebwin(7, 100),
scp.signal.windows.chebwin(6, 10),
scp.signal.windows.chebwin(7, 10),
scp.signal.windows.chebwin(6, 10, False))
return ret
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_odd_high_attenuation(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
cheb_odd = scp.signal.windows.chebwin(53, at=-40)
return cheb_odd
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_even_high_attenuation(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
cheb_even = scp.signal.windows.chebwin(54, at=40)
return cheb_even
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_odd_low_attenuation(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
cheb_odd = scp.signal.windows.chebwin(7, at=10)
return cheb_odd
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_even_low_attenuation(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
cheb_even = scp.signal.windows.chebwin(8, at=-10)
return cheb_even
exponential_data = {
(4, None, 0.2, False): True,
(4, None, 0.2, True): True,
(4, None, 1.0, False): True,
(4, None, 1.0, True): True,
(4, 2, 0.2, False): True,
(4, 2, 0.2, True): False,
(4, 2, 1.0, False): True,
(4, 2, 1.0, True): False,
(5, None, 0.2, True): True,
(5, None, 1.0, True): True,
(5, 2, 0.2, True): False,
(5, 2, 1.0, True): False
}
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_exponential(xp, scp):
for args, valid in exponential_data.items():
if not valid:
assert_raises(ValueError, scp.signal.windows.exponential, *args)
else:
win = scp.signal.windows.exponential(*args)
return win
class TestFlatTop:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.flattop(6, sym=False),
scp.signal.windows.flattop(7, sym=False),
scp.signal.windows.flattop(6),
scp.signal.windows.flattop(7, True),)
class TestGaussian:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.gaussian(6, 1.0),
scp.signal.windows.gaussian(7, 1.2),
scp.signal.windows.gaussian(7, 3),
scp.signal.windows.gaussian(6, 3, False),)
class TestGeneralCosine:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.general_cosine(5, [0.5, 0.3, 0.2]),
scp.signal.windows.general_cosine(4, [0.5, 0.3, 0.2],
sym=False),)
class TestGeneralHamming:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.general_hamming(5, 0.7),
scp.signal.windows.general_hamming(5, 0.75, sym=False),
scp.signal.windows.general_hamming(6, 0.75, sym=True),)
class TestHamming:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.hamming(6, False),
scp.signal.windows.hamming(7, sym=False),
scp.signal.windows.hamming(6),
scp.signal.windows.hamming(7, sym=True),)
class TestHann:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.hann(6, sym=False),
scp.signal.windows.hann(7, sym=False),
scp.signal.windows.hann(6, True),
scp.signal.windows.hann(7),)
class TestKaiser:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.kaiser(6, 0.5),
scp.signal.windows.kaiser(7, 0.5),
scp.signal.windows.kaiser(6, 2.7),
scp.signal.windows.kaiser(7, 2.7),
scp.signal.windows.kaiser(6, 2.7, False),)
@pytest.mark.skip('This has not been implemented yet in CuPy')
class TestKaiserBesselDerived:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
M = 100
w = scp.signal.windows.kaiser_bessel_derived(M, beta=4.0)
w2 = scp.signal.windows.get_window(
('kaiser bessel derived', 4.0), M, fftbins=False)
w3 = scp.signal.windows.kaiser_bessel_derived(2, beta=xp.pi / 2)
w4 = scp.signal.windows.kaiser_bessel_derived(4, beta=xp.pi / 2)
w5 = scp.signal.windows.kaiser_bessel_derived(6, beta=xp.pi / 2)
return w, w2, w3, w4, w5
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_exceptions(self, xp, scp):
M = 100
# Assert ValueError for odd window length
msg = ("Kaiser-Bessel Derived windows are only defined for even "
"number of points")
with assert_raises(ValueError, match=msg):
scp.signal.windows.kaiser_bessel_derived(M + 1, beta=4.)
# Assert ValueError for non-symmetric setting
msg = ("Kaiser-Bessel Derived windows are only defined for "
"symmetric shapes")
with assert_raises(ValueError, match=msg):
scp.signal.windows.kaiser_bessel_derived(M + 1, beta=4., sym=False)
class TestNuttall:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.nuttall(6, sym=False),
scp.signal.windows.nuttall(7, sym=False),
scp.signal.windows.nuttall(6),
scp.signal.windows.nuttall(7, True),)
class TestParzen:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.parzen(6),
scp.signal.windows.parzen(7, sym=True),
scp.signal.windows.parzen(6, False),)
class TestTriang:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
return (scp.signal.windows.triang(6, True),
scp.signal.windows.triang(7),
scp.signal.windows.triang(6, sym=False),)
tukey_data = [
(4, 0.5, True),
(4, 0.9, True),
(4, 1.0, True),
(4, 0.5, False),
(4, 0.9, False),
(4, 1.0, False),
(5, 0.0, True),
(5, 0.8, True),
(5, 1.0, True),
(6, 0),
(7, 0),
(6, .25),
(7, .25),
(6,),
(7,),
(6, .75),
(7, .75),
(6, 1),
(7, 1),
]
class TestTukey:
@pytest.mark.parametrize('args', tukey_data)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, args, xp, scp):
# Test against hardcoded data
win = scp.signal.windows.tukey(*args)
return win
dpss_data = [
(4, 0.1, 2),
(3, 1.4, 3),
(5, 1.5, 5),
(100, 2, 4),
]
@pytest.mark.skip('This has not been implemented yet in CuPy')
class TestDPSS:
@pytest.mark.parametrize('args', tukey_data)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, args, xp, scp):
win, ratios = scp.signal.windows.dpss(*args, return_ratios=True)
return win, ratios
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_unity(self, xp, scp):
# Test unity value handling (gh-2221)
results = []
for M in range(1, 21):
# corrected w/approximation (default)
win = scp.signal.windows.dpss(M, M / 2.1)
results.append(win)
# corrected w/subsample delay (slower)
win_sub = scp.signal.windows.dpss(M, M / 2.1, norm='subsample')
if M > 2:
# @M=2 the subsample doesn't do anything
results.append(win_sub)
# not the same, l2-norm
win_2 = scp.signal.windows.dpss(M, M / 2.1, norm=2)
results.append(win_2)
return results
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_extremes(self, xp, scp):
# Test extremes of alpha
lam1 = scp.signal.windows.dpss(31, 6, 4, return_ratios=True)[1]
lam2 = scp.signal.windows.dpss(31, 7, 4, return_ratios=True)[1]
lam3 = scp.signal.windows.dpss(31, 8, 4, return_ratios=True)[1]
return lam1, lam2, lam3
@pytest.mark.parametrize('windows', [cu_windows, cpu_windows])
def test_degenerate(self, windows):
# Test failures
assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax
assert_raises(ValueError, windows.dpss, 4, 1.5, -5)
assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1)
assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2.
assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos
assert_raises(ValueError, windows.dpss, 3, 0, 3)
assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M
@pytest.mark.skip('This has not been implemented yet in CuPy')
class TestLanczos:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_basic(self, xp, scp):
# Analytical results:
# sinc(x) = sinc(-x)
# sinc(pi) = 0, sinc(0) = 1
# Hand computation on WolframAlpha:
# sinc(2 pi / 3) = 0.413496672
# sinc(pi / 3) = 0.826993343
# sinc(3 pi / 5) = 0.504551152
# sinc(pi / 5) = 0.935489284
return (scp.signal.windows.lanczos(6, sym=False),
scp.signal.windows.lanczos(6),
scp.signal.windows.lanczos(7, sym=True),)
@pytest.mark.parametrize('windows', [cu_windows, cpu_windows])
def test_array_size(self, windows):
for n in [0, 10, 11]:
assert len(windows.lanczos(n, sym=False)) == n
assert len(windows.lanczos(n, sym=True)) == n
class TestGetWindow:
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_boxcar(self, xp, scp):
w1 = scp.signal.windows.get_window('boxcar', 12)
# window is a tuple of len 1
w2 = scp.signal.windows.get_window(('boxcar',), 16)
return w1, w2
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_odd(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
w = scp.signal.windows.get_window(
('chebwin', -40), 53, fftbins=False)
return w
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_cheb_even(self, xp, scp):
with warnings.catch_warnings():
# sup.filter(UserWarning, "This window is not suitable")
w = scp.signal.windows.get_window(
('chebwin', 40), 54, fftbins=False)
return w
@pytest.mark.skip('This has not been implemented yet in CuPy')
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_dpss(self, xp, scp):
win1 = scp.signal.windows.get_window(('dpss', 3), 64, fftbins=False)
win2 = scp.signal.windows.dpss(64, 3)
return win1, win2
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_kaiser_float(self, xp, scp):
win1 = scp.signal.windows.get_window(7.2, 64)
win2 = scp.signal.windows.kaiser(64, 7.2, False)
return win1, win2
@pytest.mark.parametrize('windows', [cu_windows, cpu_windows])
def test_invalid_inputs(self, windows):
# Window is not a float, tuple, or string
assert_raises(ValueError, windows.get_window, set('hann'), 8)
# Unknown window type error
assert_raises(ValueError, windows.get_window, 'broken', 4)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_array_as_window(self, xp, scp):
# scipy github issue 3603
osfactor = 128
sig = xp.arange(128)
win = scp.signal.windows.get_window(('kaiser', 8.0), osfactor // 2)
if hasattr(scp.signal, 'resample'):
with assert_raises(ValueError, match='must have the same length'):
scp.signal.resample(sig, len(sig) * osfactor, window=win)
return win
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_general_cosine(self, xp, scp):
return (scp.signal.get_window(('general_cosine', [0.5, 0.3, 0.2]), 4),
scp.signal.get_window(('general_cosine', [0.5, 0.3, 0.2]), 4,
fftbins=False))
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_general_hamming(self, xp, scp):
return (
scp.signal.get_window(('general_hamming', 0.7), 5),
scp.signal.get_window(('general_hamming', 0.7), 5, fftbins=False),)
@pytest.mark.skip('This has not been implemented yet in CuPy')
def test_lanczos(self, xp, scp):
return (scp.signal.get_window('lanczos', 6),
scp.signal.get_window('lanczos', 6, fftbins=False),
scp.signal.get_window('lanczos', 6),
scp.signal.get_window('sinc', 6))
@pytest.mark.parametrize('window_info', window_funcs)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_windowfunc_basics(window_info, xp, scp):
window_name, params = window_info
if window_name in {'parzen', 'tukey'}:
pytest.skip()
window = getattr(scp.signal.windows, window_name)
results = []
with warnings.catch_warnings():
# Check symmetry for odd and even lengths
w1 = window(8, *params, sym=True)
w2 = window(7, *params, sym=False)
results += [w1, w2]
w1 = window(9, *params, sym=True)
w2 = window(8, *params, sym=False)
results += [w1, w2]
# Check that functions run and output lengths are correct
results.append(len(window(6, *params, sym=True)))
results.append(len(window(6, *params, sym=False)))
results.append(len(window(7, *params, sym=True)))
results.append(len(window(7, *params, sym=False)))
# Check invalid lengths
assert_raises((ValueError, TypeError), window, 5.5, *params)
assert_raises((ValueError, TypeError), window, -7, *params)
# Check degenerate cases
results.append(window(0, *params, sym=True))
results.append(window(0, *params, sym=False))
results.append(window(1, *params, sym=True))
results.append(window(1, *params, sym=False))
# Check normalization
results.append(window(10, *params, sym=True))
results.append(window(10, *params, sym=False))
results.append(window(9, *params, sym=True))
results.append(window(9, *params, sym=False))
# Check that DFT-even spectrum is purely real for odd and even
results.append(scp.fft.fft(window(10, *params, sym=False)).imag)
results.append(scp.fft.fft(window(11, *params, sym=False)).imag)
return results
@pytest.mark.parametrize('windows', [cu_windows, cpu_windows])
def test_needs_params(windows):
for winstr in ['kaiser', 'ksr', 'kaiser_bessel_derived', 'kbd',
'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'dss', 'dpss', 'general cosine', 'general_cosine',
'chebwin', 'cheb', 'general hamming', 'general_hamming',
]:
assert_raises(ValueError, windows.get_window, winstr, 7)
@testing.numpy_cupy_allclose(scipy_name='scp', rtol=1e-13, atol=1e-13)
def test_not_needs_params(xp, scp):
for winstr in ['barthann',
'bartlett',
'blackman',
'blackmanharris',
'bohman',
'boxcar',
'cosine',
'flattop',
'hamming',
'nuttall',
'parzen',
'taylor',
'exponential',
'poisson',
'tukey',
'tuk',
'triangle',
]:
win = scp.signal.get_window(winstr, 7)
return win
| cupy/cupy | tests/cupyx_tests/scipy_tests/signal_tests/test_windows.py | test_windows.py | py | 22,594 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupy.testing.numpy_cupy_allclose",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cupy.testing",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "cupy.testing.numpy_cupy_allclose",
"line_number": 52,
"usage_type": "call"
},
{
"ap... |
72425931625 | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import pandas as pd
from selenium.webdriver.common.by import By
import re
from webdriver_manager.chrome import ChromeDriverManager
import json
import time
import os
products_categories = [
't-shirts-tank-tops',
'pants',
'hoodies-sweatshirts',
'shirts',
'suits-blazers',
'cardigans-sweaters',
'jeans',
'jackets-coats',
'shorts',
'swimwear',
'sportswear',
'underwear',
'socks',
'accessories',
'shoes',
'sleepwear-loungewear',
'premium-selection',
'cardigans-sweaters',
'jackets-coats',
'knitwear'
]
parent_link = 'https://www2.hm.com/en_us'
additional_link = '?sort=stock&image-size=small&image=model&offset=0&page-size={}'
gender_spec_url = 'men/products'
# get the category from the keys
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
# options.add_argument('--incognito')
# options.add_argument('--headless')
driver = webdriver.Chrome(options=options)
products_links = {}
for category in categories:
products_links[category] = []
print(category)
# create url for this category
if category in products_categories:
cat_url = os.path.join(parent_link, gender_spec_url, category+'.html')
print(cat_url)
# open this url and get the count of number of items for this product
driver.get(cat_url)
time.sleep(0.2)
# now get the total count of products present in this page
product_count_element = driver.find_element(By.CLASS_NAME, "filter-pagination")
product_count_element_text = product_count_element.text
product_count_str = product_count_element_text.split(' ')[0]
if product_count_str=='':
continue
total_count = int(product_count_str)
print(total_count)
all_products_url = cat_url+additional_link.format(total_count)
driver.get(all_products_url)
element_by_class = driver.find_element(By.CLASS_NAME, "products-listing")
products_elements = element_by_class.find_elements(By.CLASS_NAME, "product-item")
for pe in products_elements:
single_product = driver.find_element(By.CLASS_NAME, "item-link.remove-loading-spinner")
href = single_product.get_attribute("href")
title = single_product.get_attribute('title')
products_links[category].append([title,href])
f = open('product_links_men.json','w')
json.dump(products_links, f)
| umairahmad89/h-m-scraper | scraper.py | scraper.py | py | 2,718 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 46,
"usage_type": "call"
},
{
"api... |
35056823314 | import pickle
import json
import yaml
import numpy as np
import torch
import torch.optim as optim
import time
from data_manager import DataManager
from model import BiLSTMCRF
from utils import f1_score, get_tags, format_result
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir='./tensorboard/bioes')
class ChineseNER(object):
def __init__(self, entry="train"):
self.load_config()
self.__init_model(entry)
def __init_model(self, entry):
if entry == "train":
self.train_manager = DataManager(batch_size=self.batch_size, tags=self.tags)
self.total_size = len(self.train_manager.batch_data)
data = {
"batch_size": self.train_manager.batch_size,
"input_size": self.train_manager.input_size,
"vocab": self.train_manager.vocab,
"tag_map": self.train_manager.tag_map,
}
self.save_params(data)
self.dev_manager = DataManager(batch_size=60, data_type="dev")
# 验证集
# self.dev_batch = self.dev_manager.iteration()
self.model = BiLSTMCRF(
tag_map=self.train_manager.tag_map,
batch_size=self.batch_size,
vocab_size=len(self.train_manager.vocab),
dropout=self.dropout,
embedding_dim=self.embedding_size,
hidden_dim=self.hidden_size,
)
self.model = self.model.cuda()
self.restore_model()
elif entry == "predict" or "evaluate":
# python main.py predict
data_map = self.load_params()
input_size = data_map.get("input_size")
self.tag_map = data_map.get("tag_map")
self.vocab = data_map.get("vocab")
print('input_size',input_size)
print('tag_map',self.tag_map)
self.model = BiLSTMCRF(
tag_map=self.tag_map,
vocab_size=input_size,
embedding_dim=self.embedding_size,
hidden_dim=self.hidden_size
)
self.model = self.model.cuda()
self.test_manager = DataManager(batch_size=60, data_type="dev")
self.restore_model()
# 加载配置项
def load_config(self):
try:
fopen = open("models/config.yml")
config = yaml.load(fopen)
fopen.close()
except Exception as error:
print("Load config failed, using default config {}".format(error))
fopen = open("models/config.yml", "w")
config = {
"embedding_size": 300,
"hidden_size": 128,
"batch_size": 30,
"dropout":0.5,
"model_path": "models/",
"tags": ["TREATMENT", "BODY","SIGNS","CHECK","DISEASE"]
}
yaml.dump(config, fopen)
fopen.close()
self.embedding_size = config.get("embedding_size")
self.hidden_size = config.get("hidden_size")
self.batch_size = config.get("batch_size")
self.model_path = config.get("model_path")
self.tags = config.get("tags")
self.dropout = config.get("dropout")
# 保存模型各种训练参数
def restore_model(self):
try:
self.model.load_state_dict(torch.load(self.model_path + "params_6all.pkl"))
print("model restore success!")
except Exception as error:
print("model restore faild! {}".format(error))
# 保存模型超参数
def save_params(self, data):
with open("models/data_6all.pkl", "wb") as fopen:
pickle.dump(data, fopen)
# 加载模型超参数
def load_params(self):
with open("models/data_6all.pkl", "rb") as fopen:
data_map = pickle.load(fopen)
return data_map
def train(self):
optimizer = optim.Adam(self.model.parameters(),weight_decay=0.002,lr=0.0000004) # 0.000001
# optimizer = optim.SGD(self.model.parameters(), lr=0.00000008,weight_decay=0.001,momentum=0.9) #4e-7
scheduler_lr = optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.5,patience=2,cooldown=5,verbose=True,min_lr=1e-8,eps=1e-8)
best_loss = 240
lossList = [0] * self.total_size
for epoch in range(268,401):
losses = []
index = 0
startTime = time.process_time()
for batch in self.train_manager.get_batch():
start = time.process_time()
index += 1
self.model.zero_grad()
sentences, tags, length = zip(*batch)
# lenght 是句子的原本长度
# shape (batch_size,max.len(sentence) (20,332) batch_size 和 每个batch最长句子的长度
sentences_tensor = torch.tensor(sentences, dtype=torch.long).cuda()
tags_tensor = torch.tensor(tags, dtype=torch.long).cuda()
length_tensor = torch.tensor(length, dtype=torch.long).cuda()
loss = self.model.neg_log_likelihood(sentences_tensor, tags_tensor, length_tensor)
losses.append(loss.cpu().item())
progress = ("█"*int(index * 60 / self.total_size)).ljust(60)
loss.backward()
optimizer.step()
# torch.save(self.model.state_dict(), self.model_path + 'params_6all.pkl')
end = time.process_time()
dur = end - start
print("""epoch [{}] |{}| {}/{}\n\tloss {:.3f}\t\tlast_loss {:.3f}\t\ttime {}\t\tbest_avg_loss {:.3f}""".format(
epoch, progress, index, self.total_size, loss.cpu().tolist()[0],lossList[index - 1],str(dur),best_loss
)
)
lossList[index - 1] = loss.cpu().item()
print("-" * 90)
endTime = time.process_time()
totalTime = endTime - startTime
avg_loss = np.mean(losses)
# 保存最好的模型
if avg_loss < best_loss:
best_loss = avg_loss
torch.save(self.model.state_dict(), self.model_path + 'params_6all.pkl')
writer.add_scalar('BiLstm_CRF:avg_loss-epoch', avg_loss, epoch)
print('epoch ',epoch,' avg_loss ', avg_loss,' total_time ',totalTime)
if epoch % 5 == 0:
self.evaluate(epoch/5,manager=self.dev_manager)
print("-"*100)
scheduler_lr.step(avg_loss)
writer.close()
# train: BODY 7507, SIGNS 6355, CHECK 6965, DISEASE 474, TREATMENT 805
# test:
# 计算f1,评估模型
def evaluate(self,epoch,manager,add_scalar = True):
print('正在开始评估')
all_origins = all_founds = all_rights = 0
for tag in self.tags:
origins = founds = rights = 0
for batch in manager.get_batch():
sentences, labels, length = zip(*batch)
_, paths = self.model(sentences)
origin, found, right = f1_score(labels, paths, tag, self.model.tag_map)
origins += origin
founds += found
rights += right
all_origins += origins
all_founds += founds
all_rights += rights
recall = 0. if origins == 0 else (rights / origins)
precision = 0. if founds == 0 else (rights / founds)
f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)
print("\t{}\torigins:{}\t\t\tfounds:{}\t\t\trights:{}".format(tag, origins, founds, rights))
print("\t\t\trecall:{}\tprecision:{}\tf1:{}".format(recall, precision, f1))
if add_scalar:
tag_epoch = tag + '-5epoch'
writer.add_scalars(tag_epoch,{
'recall':recall,
'precision':precision,
'f1':f1
}, epoch)
all_recall = 0. if all_origins == 0 else (all_rights / all_origins)
all_precision = 0. if all_founds == 0 else (all_rights / all_founds)
all_f1 = 0. if all_recall + all_precision == 0 else (2 * all_precision * all_recall) / (all_precision + all_recall)
print("\tall_origins:{}\t\t\tall_founds:{}\t\t\tall_rights:{}".format(all_origins, all_founds, all_rights))
print("\tall_recall:{}\tall_precision:{}\tall_f1:{}".format(all_recall, all_precision, all_f1))
if add_scalar:
writer.add_scalars("ALL-5epoch", {
'all_recall': all_recall,
'all_precision': all_precision,
'all_f1': all_f1
}, epoch)
print('评估结束')
return all_recall, all_precision, all_f1
# 预测方法
def predict(self, input_str=""):
if not input_str:
input_str = input("请输入文本: ")
# 获取输入句子所有汉字的在vocab的索引
input_vec = [self.vocab.get(i, 0) for i in input_str]
# convert to tensor
sentences = torch.tensor(input_vec,dtype=torch.long).view(1, -1)
sentences = sentences.cuda()
# paths 预测出来的标签索引 shape 为 [1,1]
_, paths = self.model(sentences)
entities = []
# "tags": ["ORG", "PER"]
for tag in self.tags:
tags = get_tags(paths[0], tag, self.tag_map)
entities += format_result(tags, input_str, tag)
print(entities)
print(json.dumps(entities,indent=4,ensure_ascii=False))
return entities
if __name__ == "__main__":
entry = input('请输入train or predict or evaluate: ')
while entry:
if entry == 'train':
cn = ChineseNER("train")
cn.train()
break
elif entry == 'predict':
cn = ChineseNER("predict")
while True:
inputText = input("请输入文本(q退出): ")
if inputText != 'q':
cn.predict(inputText)
else:
break
break
elif entry == 'evaluate':
cn = ChineseNER("evaluate")
cn.evaluate(epoch=0,manager=cn.test_manager,add_scalar=False)
break
else:
print("请输入正确的命令(train or predict or evaluate)")
entry = input('请输入train or predict or evaluate: ')
| ravesky/medical_ner_pytorch | main.py | main.py | py | 10,508 | python | en | code | 44 | github-code | 36 | [
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "data_manager.DataManager",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "data_manager.DataManager",
"line_number": 32,
"usage_type": "call"
},
... |
33380799983 | import requests
from bs4 import BeautifulSoup
import time
import plotly
import numpy as np
import pandas as pd
import datetime as dt
import cufflinks as cf
import subprocess
import traceback
from sys import exit
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import base64
import pickle
import os.path
from googleapiclient.discovery import build
# Set working directory to the project folder
os.chdir("<replace_with_path_to_project_folder>")
def extract_price(game_page):
"""Finds and returns the price of a game on a playstation store
web page. If no price is found, returns null.
"""
soup = BeautifulSoup(game_page.text, features="html.parser")
price = soup.find('span', class_='psw-h3')
if price is None:
return np.nan
else:
# Remove '£' from price and return the value
return float(price.get_text().replace('£', ''))
def get_latest_non_null(row):
"""Returns the most recent, non-null price of a row.
Returns -1 if all prices in row are null.
"""
# Value returned if no non-null value exists
price = -1
# Loops through the row backwards and returns first non-null value
for element in reversed(row):
# Check element is not null (null values dont equal themselves)
if element == element:
price = element
break
return price
def create_message(sender, to, subject, price_drop, failures, nans):
"""Create, Encode and return Gmail email.
Checks if there are rows for the price_drop, failures and nans
dataframes. If a dataframe has rows, it is included as a table
in the html body of them email.
"""
message = MIMEMultipart()
# html contains the HTML code for the email body
html = """<html>
<head></head>
<body>"""
# If price_drop df has rows, its table is included in the email
if price_drop.shape[0] > 0:
html += '<p><b>Price Drops:</b></p>'
html += price_drop.to_html(escape=False, index = False, justify = 'center')
# If failures df has rows, its table is included in the email
if len(failures) > 0:
html += '<br><p><b>Failed to Scrape:</b></p>'
html += '<br>'.join(failures)
# If nans df has rows, its table is included in the email
if nans.shape[0] > 0:
html += '<br><p><b>Price Not Found:</b></p>'
html += nans.to_html()
html += """<br></body>
</html>"""
part1 = MIMEText(html, 'html')
message.attach(part1)
message['to'] = to
message['from'] = sender
message['subject'] = subject
# Message encoded as required for the Gmail API
raw_message = base64.urlsafe_b64encode(message.as_string().encode("utf-8"))
return {'raw': raw_message.decode("utf-8")}
# Wait 10 seconds in case computer was asleep (give time for
# an internet connection to be established)
time.sleep(10)
# Attempt to retrieve google.com to confirm internet connection,
# wait 5 minutes and try again if there is a error (no connection).
# If an error occurs the second time, a Pop-up error message is
# displayed and script is terminated.
try:
requests.get('https://www.google.com/')
except:
time_of_error = time.time()
while time.time() - time_of_error < 300:
time.sleep(1)
try:
requests.get('https://www.google.com/')
except:
# Create Mac OS popup error message
applescript = """
display dialog "Playstation_scraper could not connect to the internet." ¬
with title "Internet Connection Error" ¬
with icon caution ¬
buttons {"OK"}
"""
subprocess.call("osascript -e '{}'".format(applescript), shell=True)
exit('exit')
# The game price data is stored in Game_prices.csv. Each row
# corresponds to a different game. The first column ('game')
# contains the name of the game. The second column ('game id')
# contains the unique ID for the game on the playstation store.
# The remaining columns contain the price of the game on
# each day the script was run. The header for each column is
# the date the price was found. When the script is run for the
# first time there will be no price data (there will only be
# the 'game' and 'game id' columns)
df = pd.read_csv('game_prices.csv', ',', index_col='game')
# Convert the date column headers to date-time format
category_headers = df.columns[:1].tolist()
date_headers = df.columns[1:]
converted_date_headers = pd.to_datetime(date_headers, format='%d/%m/%Y').date.tolist()
df.columns = category_headers + converted_date_headers
# The full url for a game is the base url with the game ID added at
# the end.
base_url = 'https://store.playstation.com/en-gb/product/'
# time_delay is the seconds waited between subsequent GET requests
time_delay = 10
# game_price records the price of each game today
game_price = []
time_last_request = time.time()
# failures records the game url's which result in an error when requested.
failures = []
# The game_id column of df defines the game_id for each game.
# The code loops through this and for each game id it makes a
# get request and scrapes the price of that game from its webpage.
for game_id in df.game_id:
# Waits between subsequent GET requests.
while time.time() - time_last_request < time_delay:
time.sleep(1)
try:
# full game url is base_url + game id
game_page = requests.get(base_url + game_id)
time_last_request = time.time()
game_price.append(extract_price(game_page))
# If GET request or price extraction failed, wait 300 seconds
# and try again
except:
time_error = time.time()
while time.time() - time_error < 300:
time.sleep(1)
try:
game_page = requests.get(base_url + game_id)
time_last_request = time.time()
game_price.append(extract_price(game_page))
except:
# both GET requests failed so record as failure
failures.append(base_url + game_id)
# Record game price today as null
game_price.append(np.nan)
# Add todays game prices as new column in df
date = dt.date.today()
df[date] = game_price
# Below generates a separate plot of price over time for each game in df.
n_rows = df.shape[0]
# plotly layout used to define the layout of the plot.
layout1 = cf.Layout(xaxis=dict(autorange=True, dtick='M1'),
yaxis=dict(title=dict(standoff=0, text='')),
height=150 * n_rows,
width=1200,
margin=dict(pad=0, t=100, l=0.9, r=0.9, b=1),
showlegend=False,
title=dict(text='Price of Games on Playstation Store',
x=0.5, y=0.99, xanchor='center')
)
# df is transposed so each column is a game, with the price on
# each dates in the rows. The game_id column in excluded
# by .iloc[1:,]
plotting_df = df.T.iloc[1:, ]
# Sub-plots will be in 2 columns, this is defined by the shape
# paramater, which takes a tuple (rows, columns). To calculate
# the rows we divide the number of games (total rows in df) by 1.9 and
# round the answer. e.g. if there are 7 games, we divide by 1.9 and
# round up giving us 4 rows. We use 1.9 because if we divide by 2 Python
# sometimes rounds numbers ending in 0.5 down rather than up.
shape1 = (round(n_rows / 1.9), 2)
# Plot price variation over time for each game
fig = plotting_df.iplot(subplots=True, shape=shape1,
subplot_titles=True, vertical_spacing=0.08,
horizontal_spacing=0.1, layout=layout1,
asFigure=True, color='orange', width=2)
fig.update_layout(hovermode='x unified')
# Fixes the opacity of the lines so all lines are fully visible
# (by default cufflinks gave variable opacity to the lines).
for i in fig['data']:
i['line']['color'] = "rgba(255, 153, 51, 1.0)"
# Sets color and style of the subplot titles
for i in fig['layout']['annotations']:
i['font'] = dict(size=14, color='orange')
# Adds date selector buttons (e.g. 'last month') to plots
fig.update_xaxes(
rangeselector = dict(
yanchor='bottom',
buttons=list([
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all")
])
)
)
# Set y axis range
fig.update_yaxes(nticks=8, rangemode='tozero', range=[0,60])
fig.write_html("Game Prices.html")
# The next section identifies price drops and prices that couldn't
# be found (nan_prices)
price_drops = []
nan_prices = []
# Excludes dataframes with data from only 1 date and only runs if the latest
# data is from today
if (df.shape[1] > 2):
# We want to find the latest price before todays data so
# we exclude todays column and the game_id column
# This is to account for any NAN values in the data.
df_prices_before_today = df.iloc[:, 1:-1]
# Most recent non-null price for each game is found. Note that if
# no non-null old price exists, the most recent price will be -1
most_recent_price = [get_latest_non_null(row)
for row in df_prices_before_today.to_numpy()]
# Loops through the games and identifies any price drops
for game, game_id, new_price, old_price in zip(df.index, df.game_id,
game_price, most_recent_price):
# Price drops only calculated if there is a valid price for
# today (the value is not null) and a valid last price to
# compare it to (most_recent_price is not -1)
if (new_price == new_price) & (old_price > 0):
price_delta = old_price - new_price
# Only notify price drops larger than £0.5
if price_delta > 0.5:
html_link = '<a href="' + base_url \
+ game_id + '"><div style="height:100%;width:100%">' \
+ game + '</div></a>'
price_drops.append([html_link, old_price, new_price, price_delta])
# Also tracks any prices today that have returned a nan value
elif new_price != new_price:
nan_prices.append([game, base_url + game_id])
# Replace nan prices today in df with the latest non-null value
# (assume price has stayed the same if no price was found today)
for price_today, game, most_recent_price in zip(game_price,
df.index.values.tolist(),
most_recent_price):
if (price_today != price_today) & (most_recent_price >0):
df.loc[game,date] = most_recent_price
drops = len(price_drops)
fails = len(failures)
nans = len(nan_prices)
# Checks if there is anything to email (will email price drops,
# request failures and nan prices).
if drops + fails + nans > 0:
# Builds subject line for email including number of drops. failures
# or null prices
subject = 'Rupe Playstation Price Drop Alerts: '
if drops > 0:
subject += str(drops) + ' Drops, '
if fails > 0:
subject += str(fails) + ' Failures, '
if nans > 0:
subject += str(nans) + ' Price Not Found'
# Create dataframe of price drop info to be emailed as a table
price_drop_df = pd.DataFrame(price_drops,
columns=['Game', 'Old Price',
'New Price', 'Price Drop']
)
price_drop_df = price_drop_df.sort_values(by=['Price Drop'],
ascending = False)
# Create dataframe of null prices (no price found) to be emailed
# as a table
nan_prices_df = pd.DataFrame(nan_prices, columns=['Game', 'Game_ID'])
# Create email using Gmail API
try:
# Create email message
mail = create_message('me', 'ruperthart92@gmail.com', subject,
price_drop_df, failures, nan_prices_df)
# Check that a token.pickle exists containing the gmail
# credentials and load them
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# Create the gmail service using credentials and send message
service = build('gmail', 'v1', credentials=creds)
message = (service.users().messages().send(userId='me', body=mail)
.execute())
print('email sent')
except:
# Mac OS error alert in case gmail email fails to send
applescript = """
display dialog "Playstation_scraper email failed to send." ¬
with title "Playstation_scraper: Email Failed" ¬
with icon caution ¬
buttons {"OK"}
"""
subprocess.call("osascript -e '{}'".format(applescript), shell=True)
print('email failed')
traceback.print_exc()
# Convert date time headers to strings with the same format as the
# original csv (this is the format that excel uses when you save as csv)
dates = df.columns[1:].tolist()
dates_as_strings = [date_obj.strftime('%d/%m/%Y') for date_obj in dates]
df.columns = df.columns[:1].tolist() + dates_as_strings
df.to_csv('game_prices.csv')
print('ran on ', date)
| rhart-rup/Playstation-Store-Price-Drop-Alert | main.py | main.py | py | 13,550 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.chdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number"... |
32844059000 | import xlrd
import product
def excel_reader(file_name):
# open excel sheet
loc = "C:/Users/andym/PycharmProjects/FacebookScraper/" + file_name
read_list = []
temp_list = []
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
sheet.cell_value(0, 0)
rows_total = sheet.nrows
col_total = sheet.ncols
# create a list of all of the cells in the sheet
for i in range(rows_total):
for r in range(col_total):
temp_list.append(sheet.cell_value(i, r))
# create a list of products, from temp_list
for i in range(rows_total):
temp_product = product.Product()
for r in range(col_total):
if r == 0:
temp_product.date = sheet.cell_value(i, r)
elif r == 1:
temp_product.desc = sheet.cell_value(i, r)
elif r == 2:
temp_product.price = sheet.cell_value(i, r)
elif r == 3:
temp_product.link = sheet.cell_value(i, r)
else:
print("Possible overflow detected in excelRead?")
read_list.append(temp_product)
return read_list
| andymangibbs/CraigslistScraper | excelRead.py | excelRead.py | py | 1,154 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "xlrd.open_workbook",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "product.Product",
"line_number": 25,
"usage_type": "call"
}
] |
12243691897 | from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.contrib.auth.models import User, Permission
from django.contrib import admin
from django_comment import models
from .test_app.models import TestModel
from django_comment.admin import CommentedItemAdmin, CommentedItemInline
class CommentedItemAdminTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a_model = TestModel.objects.create()
cls.author = User.objects.create(username='author')
cls.superuser = User.objects.create(username='superuser',
is_superuser=True)
cls.request_factory = RequestFactory()
url = reverse('admin:django_comment_commenteditem_add')
cls.add_request = cls.request_factory.get(url)
cls.commented_item_admin = CommentedItemAdmin(
models.CommentedItem,
admin.site
)
def test_item(self):
comment = self.a_model.comments.create(comment='test comment',
author=self.author)
self.assertEqual(self.commented_item_admin.item(comment), self.a_model)
def test_has_add_permission(self):
self.assertFalse(self.commented_item_admin.has_add_permission(
self.add_request
))
def test_has_delete_permission_with_author(self):
comment = self.a_model.comments.create(comment='test comment',
author=self.author)
url = reverse('admin:django_comment_commenteditem_delete',
args=(comment.id,))
request = self.request_factory.get(url)
request.user = self.author
self.assertFalse(self.commented_item_admin.has_delete_permission(
request, obj=comment
))
def test_has_delete_permission_with_superuser(self):
comment = self.a_model.comments.create(comment='test comment',
author=self.author)
url = reverse('admin:django_comment_commenteditem_delete',
args=(comment.id,))
request = self.request_factory.get(url)
request.user = self.superuser
self.assertTrue(self.commented_item_admin.has_delete_permission(
request, obj=comment
))
class CommentedItemInlineTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a_model = TestModel.objects.create()
cls.author = User.objects.create(username='author')
cls.request_factory = RequestFactory()
cls.commented_item_inline = CommentedItemInline(
TestModel,
admin.site
)
def test_has_change_permission(self):
comment = self.a_model.comments.create(comment='test comment',
author=self.author)
url = reverse('admin:test_app_testmodel_change',
args=(self.a_model.id,))
request = self.request_factory.get(url)
request.user = self.author
self.assertFalse(self.commented_item_inline.has_change_permission(
request, obj=self.a_model
))
class HasCommentsAdminTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a_model = TestModel.objects.create()
cls.author = User.objects.create(username='author', is_staff=True)
permissions = set(Permission.objects.filter(
codename__contains='testmodel'
)) | set(Permission.objects.filter(
codename__contains='commenteditem'
))
cls.author.user_permissions.add(*permissions)
def test_save_formset(self):
url = reverse('admin:test_app_testmodel_change',
args=(self.a_model.id,))
self.client.force_login(user=self.author)
prefix = 'django_comment-commenteditem-content_type-object_id-'
response = self.client.post(url, follow=True, data={
prefix + 'TOTAL_FORMS': 1,
prefix + 'INITIAL_FORMS': 0,
prefix + '0-comment': 'test comment',
'_continue': 'Save+and+continue+editing',
})
self.assertEqual(response.status_code, 200)
comment = self.a_model.comments.first()
self.assertEqual(comment.author, self.author)
| genosltd/django-comment | tests/test_admin.py | test_admin.py | py | 4,334 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "test_app.models.TestModel.objects.create",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "test_app.models.TestModel.objects",
"line_number": 16,
"usage_type": "attri... |
22543357667 | import jwt
import json
import logging
import time
from jwt import ExpiredSignatureError
logger = logging.getLogger("handler_logger")
logger.setLevel(logging.DEBUG)
def jwt_encode(obj):
try:
return jwt.encode(obj,
'#0wc-0-#@#14e8rbk#bke_9rg@nglfdc3&6z_r6nx!q6&3##l=',
algorithm='HS256').decode('utf-8')
except ValueError:
logger.debug("Failed: Unable to generate JWT")
return ""
def jwt_decode(token):
try:
return jwt.decode(token,
"#0wc-0-#@#14e8rbk#bke_9rg@nglfdc3&6z_r6nx!q6&3##l=",
algorithms="HS256")
except ExpiredSignatureError as e:
return {"exp": int(time.time()-1)}
except ValueError:
logger.debug("Failed: Unable to decode JWT token")
return ""
| gaurav3g/chat-sls-server | backend/utils/jwt_utils.py | jwt_utils.py | py | 842 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "jwt.encode",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jwt.decode",
"line_... |
23129694122 | import speech_recognition as sr
from state import State
from ClientThread import*
import threading
class VoiceRecognizer:
State.event = 'create'
def __init__(self):
self.client = ClientThread()
self.r = sr.Recognizer()
self.speech = ''
self.recognitionResult = ''
self.dictionary = ["draw","click","clear","delete","delete all","right","left","up","middle","down","red","white","green","pink","create","create here","create this here",
"create that here","create that shape","create shape here","create this shape",
"create that shape here","create the shape here","in the right","in the left","in the middle"]
def recognize_voice(self):
with sr.Microphone() as source:
self.r.adjust_for_ambient_noise(source)
print("\n")
print("Microphone activated...")
print("Recognizing what's been said...")
audio = self.r.listen(source,phrase_time_limit=3)
try:
self.recognitionResult = self.r.recognize_google(audio)
print('You said : {}'.format(self.recognitionResult))
print("\n")
except:
print("please say it again !")
def sendData(self):
while(True):
if(self.recognitionResult in self.dictionary):
self.client.send(self.recognitionResult)
self.recognitionResult = ''
def startVoiceReco(self):
new_thread = threading.Thread(target=self.sendData)
new_thread.start()
while(True):
self.recognize_voice()
| Moufdi96/Projet_IHM_Multimodal | speecheRecognizer.py | speecheRecognizer.py | py | 1,682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "state.State.event",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "state.State",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "speech... |
496475437 | from dagster_pandas import DataFrame
from google.cloud.bigquery.job import LoadJobConfig, QueryJobConfig
from google.cloud.bigquery.table import EncryptionConfiguration, TimePartitioning
from dagster import InputDefinition, List, Nothing, OutputDefinition, Path, check, solid
from .configs import (
define_bigquery_create_dataset_config,
define_bigquery_delete_dataset_config,
define_bigquery_load_config,
define_bigquery_query_config,
)
from .types import BigQueryLoadSource
_START = 'start'
def _preprocess_config(cfg):
destination_encryption_configuration = cfg.get('destination_encryption_configuration')
time_partitioning = cfg.get('time_partitioning')
if destination_encryption_configuration is not None:
cfg['destination_encryption_configuration'] = EncryptionConfiguration(
kms_key_name=destination_encryption_configuration
)
if time_partitioning is not None:
cfg['time_partitioning'] = TimePartitioning(**time_partitioning)
return cfg
def bq_solid_for_queries(sql_queries):
"""
Executes BigQuery SQL queries.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
"""
sql_queries = check.list_param(sql_queries, 'sql queries', of_type=str)
@solid(
input_defs=[InputDefinition(_START, Nothing)],
output_defs=[OutputDefinition(List[DataFrame])],
config=define_bigquery_query_config(),
required_resource_keys={'bigquery'},
metadata={'kind': 'sql', 'sql': '\n'.join(sql_queries)},
)
def bq_solid(context): # pylint: disable=unused-argument
query_job_config = _preprocess_config(context.solid_config.get('query_job_config', {}))
# Retrieve results as pandas DataFrames
results = []
for sql_query in sql_queries:
# We need to construct a new QueryJobConfig for each query.
# See: https://bit.ly/2VjD6sl
cfg = QueryJobConfig(**query_job_config) if query_job_config else None
context.log.info(
'executing query %s with config: %s'
% (sql_query, cfg.to_api_repr() if cfg else '(no config provided)')
)
results.append(
context.resources.bigquery.query(sql_query, job_config=cfg).to_dataframe()
)
return results
return bq_solid
BIGQUERY_LOAD_CONFIG = define_bigquery_load_config()
@solid(
input_defs=[InputDefinition('paths', List[Path])],
output_defs=[OutputDefinition(Nothing)],
config=BIGQUERY_LOAD_CONFIG,
required_resource_keys={'bigquery'},
)
def import_gcs_paths_to_bq(context, paths):
return _execute_load_in_source(context, paths, BigQueryLoadSource.GCS)
@solid(
input_defs=[InputDefinition('df', DataFrame)],
output_defs=[OutputDefinition(Nothing)],
config=BIGQUERY_LOAD_CONFIG,
required_resource_keys={'bigquery'},
)
def import_df_to_bq(context, df):
return _execute_load_in_source(context, df, BigQueryLoadSource.DataFrame)
@solid(
input_defs=[InputDefinition('path', Path)],
output_defs=[OutputDefinition(Nothing)],
config=BIGQUERY_LOAD_CONFIG,
required_resource_keys={'bigquery'},
)
def import_file_to_bq(context, path):
return _execute_load_in_source(context, path, BigQueryLoadSource.File)
def _execute_load_in_source(context, source, source_name):
destination = context.solid_config.get('destination')
load_job_config = _preprocess_config(context.solid_config.get('load_job_config', {}))
cfg = LoadJobConfig(**load_job_config) if load_job_config else None
context.log.info(
'executing BQ load with config: %s for source %s'
% (cfg.to_api_repr() if cfg else '(no config provided)', source)
)
context.resources.bigquery.load_table_from_source(
source_name, source, destination, job_config=cfg
).result()
@solid(
input_defs=[InputDefinition(_START, Nothing)],
config=define_bigquery_create_dataset_config(),
required_resource_keys={'bigquery'},
)
def bq_create_dataset(context):
'''BigQuery Create Dataset.
This solid encapsulates creating a BigQuery dataset.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
'''
(dataset, exists_ok) = [context.solid_config.get(k) for k in ('dataset', 'exists_ok')]
context.log.info('executing BQ create_dataset for dataset %s' % (dataset))
context.resources.bigquery.create_dataset(dataset, exists_ok)
@solid(
input_defs=[InputDefinition(_START, Nothing)],
config=define_bigquery_delete_dataset_config(),
required_resource_keys={'bigquery'},
)
def bq_delete_dataset(context):
'''BigQuery Delete Dataset.
This solid encapsulates deleting a BigQuery dataset.
Expects a BQ client to be provisioned in resources as context.resources.bigquery.
'''
(dataset, delete_contents, not_found_ok) = [
context.solid_config.get(k) for k in ('dataset', 'delete_contents', 'not_found_ok')
]
context.log.info('executing BQ delete_dataset for dataset %s' % dataset)
context.resources.bigquery.delete_dataset(
dataset, delete_contents=delete_contents, not_found_ok=not_found_ok
)
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-gcp/dagster_gcp/bigquery/solids.py | solids.py | py | 5,243 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "google.cloud.bigquery.table.EncryptionConfiguration",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery.table.TimePartitioning",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "dagster.check.list_param",
"line_number": 40... |
37349168777 | from ase.units import Ha
import numpy as np
from my_gpaw.xc.fxc import KernelWave, XCFlags, FXCCache
from my_gpaw.xc.rpa import GCut
from my_gpaw.response.pair_functions import SingleQPWDescriptor
from my_gpaw.pw.descriptor import PWMapping
class G0W0Kernel:
def __init__(self, xc, context, **kwargs):
self.xc = xc
self.context = context
self.xcflags = XCFlags(xc)
self._kwargs = kwargs
def calculate(self, qpd):
if self.xc == 'RPA':
return np.eye(qpd.ngmax)
return calculate_spinkernel(
qpd=qpd,
xcflags=self.xcflags,
context=self.context,
**self._kwargs)
def calculate_spinkernel(*, ecut, xcflags, gs, qd, ns, qpd, context):
assert xcflags.spin_kernel
xc = xcflags.xc
ibzq_qc = qd.ibzk_kc
iq = np.argmin(np.linalg.norm(ibzq_qc - qpd.q_c[np.newaxis], axis=1))
assert np.allclose(ibzq_qc[iq], qpd.q_c)
ecut_max = ecut * Ha # XXX very ugly this
cache = FXCCache(comm=context.comm,
tag=gs.atoms.get_chemical_formula(mode='hill'),
xc=xc, ecut=ecut_max)
handle = cache.handle(iq)
if not handle.exists():
# Somehow we calculated many q even though this function
# only works on one q? Very confusing.
kernel = KernelWave(
q_empty=iq, ibzq_qc=qd.ibzk_kc,
xc=xcflags.xc,
ecut=ecut_max, gs=gs,
context=context)
# The first time we miss the cache, we calculate /all/ iq.
# (Whether that's the best strategy can be discussed.)
for iq_calculated, array in kernel.calculate_fhxc():
cache.handle(iq_calculated).write(array)
fv = handle.read()
assert fv is not None
# If we want a reduced plane-wave description, create qpd mapping
if qpd.ecut < ecut:
# Recreate nonreduced plane-wave description corresponding to ecut_max
qpdnr = SingleQPWDescriptor.from_q(qpd.q_c, ecut, qpd.gd,
gammacentered=qpd.gammacentered)
pw_map = PWMapping(qpd, qpdnr)
gcut = GCut(pw_map.G2_G1)
fv = gcut.spin_cut(fv, ns=ns)
return fv
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/response/g0w0_kernels.py | g0w0_kernels.py | py | 2,219 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "my_gpaw.xc.fxc.XCFlags",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
... |
21120272187 | import sys
import pickle
import torch as T
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
sys.path.append("../") # nopep8
from model.dialog_acts import Encoder
from DataLoader.bucket_and_batch import bucket_and_batch
import numpy as np
import string
import random
device = T.device('cuda' if T.cuda.is_available() else 'cpu')
max_grad_norm = 1
with open("../data/processed_data.pkl", "rb") as fp:
data = pickle.load(fp)
labels2idx = data["labels2idx"]
idx2labels = {i: v for v, i in labels2idx.items()}
train_queries_vec = data["train_queries_vec"]
train_acts_vec = data["train_acts_vec"]
test_queries_vec = data["test_queries_vec"]
test_acts_vec = data["test_acts_vec"]
model = Encoder(D=test_queries_vec.shape[-1], classes_num=len(labels2idx))
model = model.cuda()
parameter_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Parameter Count: ", parameter_count)
optimizer = T.optim.Adam(model.parameters(), lr=1e-3)
def loss_fn(logits, labels, l2=1e-6):
regularization = T.tensor(0.).to(device) # .to(device)
for name, param in model.named_parameters():
if 'bias' not in name and 'embedding' not in name:
regularization += T.norm(param).pow(2)
loss = nn.MSELoss()
output = loss(logits, labels) + l2*regularization
return output
batches_train_queries, batches_train_classes = bucket_and_batch(
train_queries_vec, train_acts_vec, 64, len(labels2idx))
batches_test_queries, batches_test_classes = bucket_and_batch(
test_queries_vec, test_acts_vec, 64, len(labels2idx))
def predict(queries, classes, train=True):
global model
if train:
model = model.train()
else:
model = model.eval()
logits = model(T.tensor(queries).to(device))
loss = loss_fn(logits, T.tensor(classes).float().to(device))
_, sorted_idx = T.sort(logits, dim=-1, descending=True)
sorted_idx = sorted_idx[:, 0:2]
# print(sorted_idx.size())
sorted_idx = sorted_idx.cpu().numpy().tolist()
_, gold_sorted_idx = T.sort(T.tensor(classes).to(device), dim=-1, descending=True)
gold_sorted_idx = gold_sorted_idx[:, 0:2]
# print(gold_sorted_idx.size())
gold_sorted_idx = gold_sorted_idx.cpu().numpy().tolist()
score = 0
total = 0
for sorted_id, gold_sorted_id in zip(sorted_idx, gold_sorted_idx):
for id in sorted_id:
if id in gold_sorted_id:
score += 1
total += 1
return loss, (score/total)
best_val_accuracy = 0
for epoch in range(100):
i = 0
for batch_X, batch_Y in zip(batches_train_queries, batches_train_classes):
loss, accuracy = predict(batch_X, batch_Y, train=True)
loss.backward()
T.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if i % 100 == 0:
print("Step {}, Loss: {}, Accuracy: {}".format(i, loss, accuracy))
i += 1
print("\n\nStarting Validation\n\n")
total_val_accuracy = 0
i = 0
for batch_X, batch_Y in zip(batches_test_queries, batches_test_classes):
with T.no_grad():
loss, accuracy = predict(batch_X, batch_Y, train=False)
total_val_accuracy += accuracy
if i % 100 == 0:
print("Step {}, Loss: {}, Accuracy: {}".format(i, loss, accuracy))
i += 1
mean_accuracy = total_val_accuracy/len(batches_test_queries)
print("\n\nEpoch {}, Validation Result: Accuracy: {}\n".format(epoch, mean_accuracy))
if mean_accuracy > best_val_accuracy:
best_val_accuracy = mean_accuracy
T.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, "../Model_Backup/model.pt")
print("\nCheckpoint Saved\n")
| JRC1995/Chatbot | Classifier/train_and_test/train.py | train.py | py | 3,877 | python | en | code | 79 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
... |
31061296375 |
from ..utils import Object
class GetBackgroundUrl(Object):
"""
Constructs a persistent HTTP URL for a background
Attributes:
ID (:obj:`str`): ``GetBackgroundUrl``
Args:
name (:obj:`str`):
Background name
type (:class:`telegram.api.types.BackgroundType`):
Background type
Returns:
HttpUrl
Raises:
:class:`telegram.Error`
"""
ID = "getBackgroundUrl"
def __init__(self, name, type, extra=None, **kwargs):
self.extra = extra
self.name = name # str
self.type = type # BackgroundType
@staticmethod
def read(q: dict, *args) -> "GetBackgroundUrl":
name = q.get('name')
type = Object.read(q.get('type'))
return GetBackgroundUrl(name, type)
| iTeam-co/pytglib | pytglib/api/functions/get_background_url.py | get_background_url.py | py | 801 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "utils.Object",
"line_number": 35,
"usage_type": "name"
}
] |
40961470159 | # coding: utf-8
import datetime
from simpleai.search import astar, SearchProblem
from simpleai.search.viewers import BaseViewer
class RobotProblem(SearchProblem):
def __init__(self, pallets_a_entregar):
'''
En el estado necesitamos llevar la posición de los pallets, la del
robot, si tenemos un pallet cargado cual es y la lista de pallets por
llevar. El estado entonces lo vamos a representar con una tupla con
estos elementos, las posiciones serán tuplas de coordenadas y para los
pallets una tupla de posiciones para cada pallet. Si el pallet deja de
estar en el tablero la posición sera None.
Las coordenadas arrancan en (0, 0). Por ejemplo, la posicion de entrega
es (2, 4)
'''
self.posicion_entrega = (2, 4)
pallets = ((0, 2), (1, 0), (3, 0), (2, 0), (0, 2),
(4, 0), (4, 1), (2, 2), (0, 4), (1, 1))
robot = (1, 4)
cargado = None
inicial = (pallets, robot, cargado, tuple([p-1 for p in pallets_a_entregar]))
super(RobotProblem, self).__init__(inicial)
def is_goal(self, state):
'Nuestra meta es que todos los pallets hayan sido entregados'
return len(state[3]) == 0
def actions(self, state):
'''
Las acciones posibles son moverse hacia los 4 lados, dejar y agarrar.
Para poder moverse no debemos salir del tablero o entrar en la casilla
de un pallet que no vamos a tomar.
Para agarrar debemos estar en la misma posicion que el pallet. Si
estamos en la misma posición que un pallet, entonces estamos obligados
a tomarlo.
Para dejar un pallet tenemos que estar en la posición de entrega con un
pallet cargado.
'''
acciones = []
pallets, robot, cargado, pendientes = state
x, y = robot
pallet_en_posicion = self.buscar_pallet_en_coordenadas(x, y, pallets)
if pallet_en_posicion is not None:
acciones.append(('Agarrar', None, None))
else:
acciones.extend(self.calcular_movimientos(state))
if cargado is not None and robot == self.posicion_entrega:
acciones.append(('Dejar', None, None))
return acciones
def calcular_movimientos(self, state):
posibles_movimientos = [
('Arriba', -1, 0),
('Abajo', 1, 0),
('Izquierda', 0, -1),
('Derecha', 0, 1),
]
movimientos = []
pallets, robot, cargado, pendientes = state
cx, cy = robot
for accion, dx, dy in posibles_movimientos:
nx, ny = cx + dx, cy + dy
if 0 <= nx <= 4 and 0 <= ny <= 4:
p = self.buscar_pallet_en_coordenadas(nx, ny, pallets)
if p is None or (p in pendientes and cargado is None):
movimientos.append((accion, dx, dy))
return movimientos
def buscar_pallet_en_coordenadas(self, x, y, pallets):
for pallet, posicion in enumerate(pallets):
if (x, y) == posicion:
return pallet
return None
def result(self, state, action):
pallets, robot, cargado, pendientes = state
x, y = robot
accion, dx, dy = action
if accion == 'Dejar':
pendientes = tuple([w for w in pendientes if w != cargado])
cargado = None
elif accion == 'Agarrar':
cargado = self.buscar_pallet_en_coordenadas(x, y, pallets)
pallet_list = list(pallets)
pallet_list[cargado] = None
pallets = tuple(pallet_list)
else:
robot = (x + dx, y + dy)
return (pallets, robot, cargado, pendientes)
def cost(self, state1, action, state2):
'El costo de la acción es siempre 1'
return 1
def heuristic(self, state):
'''
Una posible heuristica es la suma de las distancias de Manhattan de
cada uno de los pallets a quitar
'''
pallets, robot, cargado, pendientes = state
posiciones_pendientes = [pallets[x] for x in pendientes if x != cargado]
if cargado is not None:
posiciones_pendientes.append(robot)
return sum([manhattan(x, self.posicion_entrega)
for x in posiciones_pendientes])
def state_representation(self, state):
pallets, robot, cargado, pendientes = state
template = [[' ']*5 for x in range(5)]
for pallet, pos in enumerate(pallets):
if pos is not None:
fila, columna = pos
template[fila][columna] = str(pallet+1)
x, y = self.posicion_entrega
template[x][y] = 'E'
r = 'R'
if cargado:
r = 'R' + str(cargado+1)
x, y = robot
template[x][y] = r
return '\n'.join([' | '.join(fila) for fila in template])
def manhattan(pos1, pos2):
x1, y1 = pos1
x2, y2 = pos2
return abs(x2 - x1) + abs(y2 - y1)
def main():
problema = RobotProblem([8, 3, 9])
visor = BaseViewer()
inicio = datetime.datetime.now()
resultado = astar(problema, graph_search=True, viewer=visor)
tiempo = (datetime.datetime.now() - inicio).total_seconds()
for i, (accion, estado) in enumerate(resultado.path()):
print('Acción N: {} {} ## Estado: {}'.format(i, accion, estado))
print("Costo: {}".format(resultado.cost))
print("Nodos explorados: {}".format(visor.stats['visited_nodes']))
print("Tamaño máximo frontera: {}".format(visor.stats['max_fringe_size']))
print("Tiempo transcurrido: {} segundos".format(tiempo))
if __name__ == '__main__':
main()
| ucse-ia/ucse_ia | practicas/robot_pallets.py | robot_pallets.py | py | 5,721 | python | es | code | 5 | github-code | 36 | [
{
"api_name": "simpleai.search.SearchProblem",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "simpleai.search.viewers.BaseViewer",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 152,
"usage_type": "call"
},
... |
36725320029 | # -*- coding: utf-8 -*-
from preprocess import Channel
from workflow.cf_workflow import run as user_cf
from workflow.if_workflow import run as user_if
from workflow.rsif_workflow import run as user_rsif
from workflow.lfm_workflow import run as lfm
from workflow.prank_workflow import run as prank
from flask import Flask, jsonify, abort, make_response, request
from workflow.turi_workflow import runByUser as tcUser
from workflow.turi_workflow import runByItems as tcItems
from workflow.turi_workflow import runPopular as tcPopular
from workflow.turi_workflow import runSaveUserData as tcSaveUserData
from workflow.turi_workflow import runGetUserData as tcGetUserData
app = Flask(__name__)
@app.route('/recommend/<method_name>', methods=['GET', 'POST'])
def methods(method_name):
if method_name == 'preprocess':
Channel().process()
elif method_name == 'cf':
return cfMed()
elif method_name == 'rsif':
return rsifMed()
elif method_name == 'if':
return ifMed()
elif method_name == 'lfm':
return lfmMed()
elif method_name == 'prank':
return prankMed()
elif method_name == 'tcUser':
return tcUserMed()
elif method_name == 'tcItems':
return tcItemsMed()
elif method_name == 'tcPopular':
return tcPopularMed()
elif method_name == 'setData':
return tcSetData()
elif method_name == 'getData':
return tcGetData()
else:
abort(404)
def cfMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(user_cf(user_id=userId, topItems=topN))
def ifMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(user_if(user_id=userId, topItems=topN))
def rsifMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(user_rsif(user_id=userId, topItems=topN))
def lfmMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(lfm(userId=userId, topItems=topN))
def prankMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return jsonify(prank(userId=userId, topItems=topN))
def tcUserMed():
userId = request.args.get('userId', default=None, type=int)
if userId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return tcUser(userId=userId, topItems=topN)
def tcItemsMed():
itemId = request.args.get('itemId', default=None, type=int)
if itemId is None:
abort(404)
topN = request.args.get('topN', default=10, type=int)
return tcItems(itemId=itemId, topItems=topN)
def tcPopularMed():
userId = request.args.get('userId', default=None, type=int)
topN = request.args.get('topN', default=10, type=int)
return tcPopular(userId=userId, topItems=topN)
def tcSetData():
contentType = request.headers['Content-Type']
if contentType == 'application/json':
jsonStr = request.json
infoArray = jsonStr['info']
for info in infoArray:
#key = userId, itemId, rating
tcSaveUserData(info)
return jsonify(infoArray)
else:
abort(415)
def tcGetData():
userId = request.args.get('userId', default=None, type=int)
return tcGetUserData(userId)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(415)
def errorType_415(error):
return make_response(jsonify({'error': 'Unsupported Content Type'}), 415)
if __name__ == '__main__':
#app.run(host='192.168.1.241', debug=True)
app.run(host='127.0.0.1', debug=True)
| ang0410/recommend | manage.py | manage.py | py | 4,408 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "preprocess.Channel",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
... |
11439201598 | from itertools import product
from typing import Union
Coor = Union[tuple[int, int, int], tuple[int, int, int, int]]
CubeMap = set[Coor]
def get_input() -> CubeMap:
with open('input.txt', 'r') as f:
return {(i, j, 0) for i, l in enumerate(f.readlines()) for j, ch in enumerate(l) if l and ch == '#'}
def neigh(c: Coor, space: int) -> set[Coor]:
def coor_sum(a, b):
return tuple(x + y for x, y in zip(a, b))
return {coor_sum(c, nv) for nv in product([-1, 0, 1], repeat=space) if not all(x == 0 for x in nv)}
def evolve(active: CubeMap, space: int=3) -> CubeMap:
next_active, visited = set(), set()
for c in active:
for x in neigh(c, space) - visited:
if x in active and len(neigh(x, space) & active) in [2, 3]:
next_active.add(x)
elif not x in active and len(neigh(x, space) & active) == 3:
next_active.add(x)
visited.add(x)
return next_active
def part_1(initial) -> int:
active = initial
for _ in range(6):
active = evolve(active)
return len(active)
def part_2(initial) -> int:
active = {(*c, 0) for c in initial} # convert to 4d
for _ in range(6):
active = evolve(active, space=4)
return len(active)
if __name__ == "__main__":
initial = get_input()
print(f'Part 1 answer: {part_1(initial)}')
print(f'Part 2 answer: {part_2(initial)}')
| markopuzav/aoc-2020 | day17/solution.py | solution.py | py | 1,413 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "itertools.product",
"line_number": 14,
"usage_type": "call"
}
] |
15827248022 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import importlib
import os
from emission.core.wrapper.trip_old import Coordinate
import requests
import emission.core.wrapper.entry as ecwe
import emission.analysis.intake.cleaning.clean_and_resample as clean
import emission.net.ext_service.geocoder.nominatim as eco
#Setting query URLs
OPENSTREETMAP_QUERY_URL = os.environ.get("OPENSTREETMAP_QUERY_URL")
GEOFABRIK_QUERY_URL = os.environ.get("GEOFABRIK_QUERY_URL")
NOMINATIM_CONTAINER_URL = os.environ.get("NOMINATIM_CONTAINER_URL")
class NominatimTest(unittest.TestCase):
maxDiff = None
def setUp(self):
#Creates a fake, cleaned place in Rhode Island
fake_id = "place_in_rhodeisland"
key = "segmentation/raw_place"
write_ts = 1694344333
data = {'source': 'FakeTripGenerator','location': {'type': 'Point', 'coordinates': [-71.4128343, 41.8239891]}}
fake_place = ecwe.Entry.create_fake_entry(fake_id, key, data, write_ts)
self.fake_place = fake_place
#When a nominatim service is called, we set the value of the NOMINATIM_QUERY_URL environment variable in nominatim.py and re-load the module.
def nominatim(service):
if service == "container":
os.environ["NOMINATIM_QUERY_URL"] = NOMINATIM_CONTAINER_URL
importlib.reload(eco)
elif service == "geofabrik":
os.environ["NOMINATIM_QUERY_URL"] = GEOFABRIK_QUERY_URL
importlib.reload(eco)
elif service == "OSM":
os.environ["NOMINATIM_QUERY_URL"] = OPENSTREETMAP_QUERY_URL
importlib.reload(eco)
#Basic query to check that OSM, the Rhode Island Container, and geofabrik are returning the same data.
def test_geofabrik_and_nominatim(self):
lat, lon = 41.8239891, -71.4128343
NominatimTest.nominatim("container")
container_result = eco.Geocoder.get_json_reverse(lat,lon)
NominatimTest.nominatim("OSM")
osm_result = eco.Geocoder.get_json_reverse(lat,lon)
NominatimTest.nominatim("geofabrik")
geofabrik_result = eco.Geocoder.get_json_reverse(lat,lon)
key_list = ['osm_id', 'boundingbox']
for k in key_list:
self.assertEqual(osm_result[k], geofabrik_result[k])
self.assertEqual(container_result[k], geofabrik_result[k])
#Checks the display name generated by get_filtered_place in clean_and_resample.py, which creates a cleaned place from the fake place
# and reverse geocodes with the coordinates.
def test_get_filtered_place(self):
fake_place_raw = self.fake_place
fake_place_data = clean.get_filtered_place(fake_place_raw).__getattr__("data")
actual_result = fake_place_data.__getattr__("display_name")
expected_result = "Dorrance Street, Providence"
self.assertEqual(expected_result, actual_result)
#Testing make_url_geo, which creates a query URL from the input string.
def test_make_url_geo(self):
expected_result = GEOFABRIK_QUERY_URL + "/search?q=Providence%2C+Rhode+Island&format=json"
NominatimTest.nominatim("geofabrik")
actual_result = eco.Geocoder.make_url_geo("Providence, Rhode Island")
self.assertEqual(expected_result, actual_result)
#Testing make_url_reverse, which creates a query url from a lat and lon.
def test_make_url_reverse(self):
NominatimTest.nominatim("geofabrik")
lat, lon = 41.8239891, -71.4128343
expected_result = GEOFABRIK_QUERY_URL + (f"/reverse?lat={lat}&lon={lon}&format=json")
actual_result = (eco.Geocoder.make_url_reverse(lat, lon))
self.assertEqual(expected_result, actual_result)
#Testing get_json_geo, which passes in an address as a query. Compares three select k,v pairs in the results.
def test_get_json_geo(self):
NominatimTest.nominatim("geofabrik")
expected_result = {'place_id': 132490, 'licence': 'Data © OpenStreetMap contributors, ODbL 1.0. https://osm.org/copyright', 'osm_type': 'way', 'osm_id': 141567710, 'boundingbox': ['41.8325787', '41.8332278', '-71.4161848', '-71.4152064'], 'lat': '41.8330097', 'lon': '-71.41568124868104', 'display_name': 'State of Rhode Island Department of Administration, 1, Park Street, Downtown, Providence, Providence County, Rhode Island, 02908, United States', 'class': 'building', 'type': 'civic', 'importance': 1.75001}
actual_result = eco.Geocoder.get_json_geo("State of Rhode Island Department of Administration, 1, Park Street, Downtown, Providence, Providence County, 02908, United States")[0]
key_list = ['osm_id', 'boundingbox', 'display_name']
for k in key_list:
self.assertEqual(expected_result[k], actual_result[k])
#Testing the geocode function, which passes in an address and gets latitude and longitude.
# Test creates instance of coordinates using coordinate class. Getting lat and lon of the coordinate using get_lat and get_lon methods from the class.
def test_geocode(self):
NominatimTest.nominatim("geofabrik")
expected_result_lon = Coordinate(41.8239891, -71.4128343).get_lon()
expected_result_lat = Coordinate(41.8239891, -71.4128343).get_lat()
actual_result = eco.Geocoder.geocode("Providence, Rhode Island")
actual_result_lon = actual_result.get_lon()
actual_result_lat = actual_result.get_lat()
self.assertEqual(expected_result_lon, actual_result_lon)
self.assertEqual(expected_result_lat, actual_result_lat)
#Testing get_json_reverse, which reverse geocodes from a lat and lon. Tested result was modified to only look at the name returned with the coordinates, rather than the entire dictionary.
def test_get_json_reverse(self):
NominatimTest.nominatim("geofabrik")
expected_result = "Providence City Hall"
actual_result = eco.Geocoder.get_json_reverse(41.8239891, -71.4128343)["display_name"].split(",")[0]
self.assertEqual(expected_result, actual_result)
#Testing reverse_geocode, which reverse geocodes from a lat and lon and returns only the display name.
def test_reverse_geocode(self):
NominatimTest.nominatim("geofabrik")
expected_result = "Portugal Parkway, Fox Point, Providence, Providence County, Rhode Island, 02906, United States"
actual_result = eco.Geocoder.reverse_geocode(41.8174476, -71.3903767)
self.assertEqual(expected_result, actual_result)
if __name__ == '__main__':
unittest.main() | e-mission/e-mission-server | emission/individual_tests/TestNominatim.py | TestNominatim.py | py | 6,717 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "future.standard_library.install_aliases",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_... |
4619575632 | #!/usr/bin/env python
import django
from net_system.models import NetworkDevice, Credentials
from pprint import pprint
rtrs = {
"test-sw1": {
"port": "22",
"username": "admin1",
"eapi_port": "443",
"password": "99saturday",
"ip": "1.1.1.1",
"device_type": "arista_eos"
},
"test-sw2": {
"port": "22",
"username": "admin1",
"eapi_port": "443",
"password": "99saturday",
"ip": "2.2.2.2",
"device_type": "arista_eos"
}
}
def dump_devices():
for obj in NetworkDevice.objects.all():
pprint(obj.__dict__)
def dump_credentials():
for obj in Credentials.objects.all():
pprint(obj.__dict__)
def main():
django.setup()
curCred = Credentials.objects.get(username='admin1')
# Add Device
dbDevice = NetworkDevice(
device_name='test-sw4',
device_type='cisco',
ip_address='2.2.2.2',
port='22',
vendor='cisco',
credentials=curCred)
dbDevice.save()
# Add device get_or_create
dbDevice = NetworkDevice.objects.get_or_create(
device_name='test-sw5',
device_type='cisco',
ip_address='2.2.2.2',
port='22',
vendor='cisco',
credentials=curCred)
dump_devices()
dump_credentials()
if __name__ == '__main__':
main()
| jerry-bonner/pynet | class8/ex3.py | ex3.py | py | 1,335 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "net_system.models.NetworkDevice.objects.all",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "net_system.models.NetworkDevice.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "net_system.models.NetworkDevice",
"line_number": 27,... |
34338165702 | # https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder:
return None
if len(preorder) == 1:
return TreeNode(preorder[0], None, None)
root = TreeNode(preorder[0])
leftLen = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:1+leftLen], inorder[:leftLen])
root.right = self.buildTree(preorder[leftLen+1:], inorder[leftLen+1:])
return root
| 0x0400/LeetCode | p105.py | p105.py | py | 788 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
}
] |
11526198960 | import asyncio
import json
import multiprocessing as mp
from importlib import import_module
from django import http
from django.conf import settings
from django.core.cache import caches
from django.core.handlers.asgi import ASGIRequest
from django.contrib import auth
from django.utils import timezone
from asgiref.sync import sync_to_async
from loguru import logger
from worlds.models import Job, StreamLog
def add_websocket(app):
async def websocket_app(scope, receive, send):
if scope["type"] == "websocket":
await logging_socket(scope, receive, send)
return
await app(scope, receive, send)
return websocket_app
class AsyncWarpzoneRequest(ASGIRequest):
def __init__(self, scope, body_file):
scope['method'] = 'GET'
super().__init__(scope, body_file)
self.WS = http.QueryDict(scope.get('query_string', b'').decode())
def init_request(request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = engine.SessionStore(session_key)
request.user = auth.get_user(request)
def get_job(jid, obj=False):
job = Job.objects.filter(id=jid).first()
if job:
if obj:
return job
return job.to_json()
return {}
def get_log(job, pod, obj=False):
return StreamLog.objects.filter(job=job, pod=pod).first()
async def watch_log_data(job, pod, send, log_queue):
lines = 0
wait = 0.1
while 1:
try:
await asyncio.sleep(wait)
wait = 3.0
log = await sync_to_async(get_log, thread_sensitive=True)(job, pod)
if log:
if log.lines != lines:
lines_send = ''
line_array = []
for i in range(lines, log.lines):
line_array.append(f'{pod}-{i}')
if line_array:
line_dict = caches['default'].get_many(line_array)
msg_lines = ''
for l in line_array:
m = line_dict.get(l, None)
if m is not None:
msg_lines += m
if msg_lines:
msg = {'type': 'log', 'data': msg_lines}
await send({'type': 'websocket.send', 'text': json.dumps(msg)})
lines = log.lines
if log.status in ['completed', 'failed']:
break
except:
import traceback
traceback.print_exc()
raise
try:
if log_queue.get_nowait():
log_queue.task_done()
caches['default'].set(f'shutdown-{pod}', 'shutdown', 60)
return
except asyncio.QueueEmpty:
pass
async def watch_job_data(job, send, queue):
jdata = await sync_to_async(get_job, thread_sensitive=True)(job)
last = timezone.now()
while 1:
await asyncio.sleep(0.1)
now = timezone.now()
diff = now - last
if diff.total_seconds() > 5:
last = now
new_data = await sync_to_async(get_job, thread_sensitive=True)(job)
if new_data['modified'] != jdata['modified']:
jdata = new_data
msg = {'type': 'job', 'data': jdata}
logger.info('Sending job update: {} {}', jdata['id'], jdata['status'])
await send({'type': 'websocket.send', 'text': json.dumps(msg)})
try:
if queue.get_nowait():
queue.task_done()
return
except asyncio.QueueEmpty:
pass
async def logging_socket(scope, receive, send):
request = AsyncWarpzoneRequest(scope, None)
await sync_to_async(init_request, thread_sensitive=True)(request)
task = None
log_task = None
log_queue = None
connected = False
while 1:
event = await receive()
job = request.WS.get('job')
pod = request.WS.get('pod')
if event['type'] == 'websocket.connect':
logger.info('Websocket Connected')
if not request.user.is_authenticated:
logger.info('User not authenticated, Closing Socket')
await send({'type': 'websocket.close'})
return
job_queue = asyncio.Queue()
task = asyncio.create_task(watch_job_data(job, send, job_queue))
if pod:
log_queue = asyncio.Queue()
log_task = asyncio.create_task(watch_log_data(job, pod, send, log_queue))
await send({'type': 'websocket.accept'})
connected = True
if connected and event['type'] == 'websocket.disconnect':
logger.info('Websocket Disconnected')
await job_queue.put(True)
await job_queue.join()
task.cancel()
await asyncio.gather(task, return_exceptions=True)
if log_queue:
await log_queue.put(True)
await log_queue.join()
log_task.cancel()
await asyncio.gather(log_task, return_exceptions=True)
return
if connected and event['type'] == 'websocket.receive':
logger.info('Received Message')
| cognitive-space/warpzone | worlds/websocket.py | websocket.py | py | 5,411 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.core.handlers.asgi.ASGIRequest",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.http.QueryDict",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 35,
"usage_type": "name"
},
{
"api_name"... |
12785925952 | from py_reconhecimento import TReconhecimento
from py_cadastro import TCadastro
from py_principal import TPrincipal
from kivy.uix.screenmanager import ScreenManager
from kivy.app import App
from kivy import Config
from kivy.lang import Builder
Config.set('graphics', 'resizable', True)
Config.set('kivy', 'exit_on_escape', '0')
# Config.set('graphics', 'window_state', 'maximized')
Config.set('graphics', 'width', 1000)
Config.set('graphics', 'height', 600)
class GerenciadorTelas(ScreenManager):
def __init__(self):
super().__init__()
self.tprincipal = TPrincipal()
self.tcadastro = TCadastro()
self.treconhecimento = TReconhecimento()
self.add_widget(self.tprincipal)
self.add_widget(self.tcadastro)
self.add_widget(self.treconhecimento)
class Kv_Main(App):
title = 'Sistema de controle de acesso por Reconheicmento Facial'
icon = '/assets/ImagesApp/logo.png'
def build(self):
Builder.load_file('kv_main.kv')
return GerenciadorTelas()
if __name__ == '__main__':
Kv_Main().run()
| eticialima/recognitionfacial | project/py_main.py | py_main.py | py | 1,088 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "kivy.Config.set",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "kivy.Config",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "kivy.Config.set",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "kivy.Config",
"line_num... |
21107255277 | import sqlite3
#Her oprettes en forbindelse til databasefilen
#Hvis filen ikke findes, vil sqlite oprette en ny tom database.
con = sqlite3.connect('start.db')
print('Database åbnet')
try:
con.execute("""CREATE TABLE personer (
id INTEGER PRIMARY KEY AUTOINCREMENT,
navn STRING,
alder INTEGER)""")
print('Tabel oprettet')
except Exception as e:
print('Tabellen findes allerede')
c = con.cursor()
c.execute('INSERT INTO personer (navn,alder) VALUES (?,?)', ("Hans", 38))
c.execute('INSERT INTO personer (navn,alder) VALUES (?,?)', ("Kim", 37))
#Efter at have ændret i databasen skal man kalde funktionen commit.
con.commit()
#Denne variabel bruges til at modtage input fra brugeren
inp = ''
print('')
print('Kommandoer: ')
print(' vis - Viser alle personer i databasen')
print(' ny - Opret ny person')
print(' q - Afslut program')
while not inp.startswith('q'):
inp = input('> ')
if inp == 'vis':
c = con.cursor()
c.execute('SELECT navn,alder FROM personer')
for p in c:
print('{} er {} år'.format(p[0], p[1]))
elif inp == 'ny':
n = input('Indtast navn: ')
a = input('Indtast alder: ')
c = con.cursor()
c.execute('INSERT INTO personer (navn,alder) VALUES (?,?)', (n, a))
con.commit()
| jonascj/learn-programming-with-python | ch-database/src/database_start.py | database_start.py | py | 1,314 | python | da | code | 2 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
}
] |
29212263006 | import numpy as np
import pandas as pd
import datetime, time
# 处理输入时间戳,当前汽车驶入时间戳转化为sumo中以秒为单位
def time_processing(timeStamp):
timeArray = time.localtime(timeStamp)
# 时间时区设置转换
base_time = datetime.datetime(timeArray[0], timeArray[1], timeArray[2], 0, 0, 0)
# 获取当日日期定位到00:00:00
base_time = time.mktime(base_time.timetuple())
# base_time转变为时间戳格式
return timeStamp - base_time
def create_trip_file(data_file="../data/chengdu/20161116.csv"):
names = ["id", "start_time", "end_time", "time?", "from_lane", "to_lane"]
data = pd.read_csv(data_file, header=None, names=names, index_col=False)
# 行索引命名,列索生成
data = data.sort_values(by='start_time', ascending=True)
# 排序升序排序
with open("../data/chengdu/20161116_trips.trips.xml", mode="w") as f:
print('''<?xml version="1.0" encoding="UTF-8"?>
<routes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/routes_file.xsd">
''', file=f)
for index, data_line in data.iterrows():
data_line["start_time"] = time_processing(data_line["start_time"])
print(
''' <trip id="{}" depart="{}" from="{}" to="{}"/>'''.format(data_line['id'], data_line['start_time'],
data_line['from_lane'],
data_line['to_lane']),
file=f)
print(
''' <trip id="{}" depart="{}" from="{}" to="{}"/>'''.format(data_line['id'], data_line['start_time'],
data_line['from_lane'],
data_line['to_lane']), )
print('''</routes>''', file=f)
| Rossions/TCSC | DataProcessing/chengdu/processing_abandon.py | processing_abandon.py | py | 2,020 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "time.localtime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"li... |
8413029677 | import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
create_table = "CREATE TABLE IF NOT EXISTS hotels (hotel_id text PRIMARY KEY, name text, stars real, price real, city text)"
cursor.execute(create_table)
connection.commit()
connection.close() | mariorodeghiero/flask-python-rest-api-course | create_db.py | create_db.py | py | 279 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
}
] |
73037104745 | import collections.abc
import copy
import typing
import enpheeph.injections.plugins.indexing.abc.indexingpluginabc
import enpheeph.utils.constants
import enpheeph.utils.dataclasses
import enpheeph.utils.enums
import enpheeph.utils.typings
class IndexingPlugin(
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
):
# it is Optional so that we can use None
active_dimension_index: typing.Optional[
typing.List[enpheeph.utils.typings.ActiveDimensionIndexType]
]
dimension_dict: enpheeph.utils.typings.DimensionDictType
def __init__(
self, dimension_dict: enpheeph.utils.typings.DimensionDictType
) -> None:
self.dimension_dict = dimension_dict
self.reset_active_dimensions()
# to select a set of dimensions to be used as active when selecting tensor indices
# by default no dimension is considered active
def select_active_dimensions(
self,
dimensions: collections.abc.Container[enpheeph.utils.enums.DimensionType],
# if True, we will move all the indices so that the first index is 0
# and the last is -1
autoshift_to_boundaries: bool = False,
# if True we fill the empty indices with the filler
# if False we will skip them
fill_empty_index: bool = True,
# the filler to use, defaults to : for a single dimension,
# which is slice(None, None)
filler: typing.Any = slice(None, None),
) -> typing.List[enpheeph.utils.typings.ActiveDimensionIndexType]:
# we invert the dimension dict to easily look it up
# as we will be using the indices to look it up instead of the names
inverted_dimension_dict = {v: k for k, v in self.dimension_dict.items()}
# we get the highest index for both the positive and the negative indices
# in terms of absolute value
# we filter the Ellipsis to avoid mypy errors
# **NOTE**: improve the typing here
no_ellipsis_dimension_dict_values: typing.List[int] = typing.cast(
typing.List[int,],
[x for x in self.dimension_dict.values() if x != Ellipsis],
)
longest_positive_range: int = max(
(x for x in no_ellipsis_dimension_dict_values if x >= 0),
# we use -1 default so that range(-1 + 1) = []
default=-1,
)
longest_negative_range: int = min(
(x for x in no_ellipsis_dimension_dict_values if x < 0),
# we use the number right outside the range to get an empty list
default=0,
)
# this list contains all the possible indices including Ellipsis
total_indices: typing.List[enpheeph.utils.typings.DimensionIndexType] = list(
# we cover all the indices to the maximum,
# including the maximum itself,
# hence the + 1
range(longest_positive_range + 1),
)
# we need to split the list creation otherwise mypy complains of different types
total_indices += [Ellipsis]
total_indices += list(
# we create the list going from the most negative index to 0
# 0 is excluded
range(
longest_negative_range,
0,
),
)
# we save the filling and the valid indices in the following list
dimension_index: typing.List[
enpheeph.utils.typings.ActiveDimensionIndexType,
] = []
for index in total_indices:
# the index is saved if it is present in the dimensions to be selected
# here we still don't consider the autoshift
if (
index in inverted_dimension_dict
and inverted_dimension_dict[index] in dimensions
):
dimension_index.append(inverted_dimension_dict[index])
# if the index is not included, we then check if we need to fill it
# due to fill_empty_index
elif fill_empty_index:
dimension_index.append(filler)
if autoshift_to_boundaries:
# we remove all the elements at the beginning/end of the list
# that are fillers
i = 0
# infinite loop, but there is a break
# **NOTE**: probably it can be optimized further
while 1:
# we start from 0, and for each filler we match we remove it
if dimension_index[i] == filler:
del dimension_index[i]
# if the element is not a filler than the start is done and we check the
# end using -1
elif i == 0:
i = -1
# if both the element is not a filler and the index is at the end, it
# means we are done
else:
break
# we copy the dimensions and we return them
self.active_dimension_index = copy.deepcopy(dimension_index)
return copy.deepcopy(self.active_dimension_index)
# to reset the active dimensions to the empty dimension dict
def reset_active_dimensions(self) -> None:
self.active_dimension_index = None
# to join indices following the order provided by the active_dimension dict
def join_indices(
self,
dimension_indices: enpheeph.utils.typings.DimensionLocationIndexType,
) -> enpheeph.utils.typings.AnyIndexType:
if self.active_dimension_index is None:
raise ValueError(
"First select the active dimensions with select_active_dimensions"
)
index: typing.List[enpheeph.utils.typings.Index1DType] = []
for i in self.active_dimension_index:
# if we have an enum as index we check it from the given dimensions
if isinstance(i, enpheeph.utils.enums.DimensionType):
# to check if we have a sequence of sequence we want each element
# to be a sequence and have no elements which are integers, as
# the other allowed values represent sequences
sequence_of_sequence = isinstance(
dimension_indices[i], collections.abc.Sequence
) and not any(
isinstance(j, int)
# we use typing.cast to avoid mypy complaining
for j in typing.cast(
typing.Sequence[typing.Any],
dimension_indices[i],
)
)
# if it is a sequence of sequences we extend the index with all the
# sub-sequences, as it will cover multiple dimensions
if sequence_of_sequence:
index.extend(
typing.cast(
typing.Tuple[enpheeph.utils.typings.Index1DType, ...],
dimension_indices[i],
),
)
# otherwise it covers only 1 dimension so we append the element directly
else:
index.append(
typing.cast(
enpheeph.utils.typings.Index1DType,
dimension_indices[i],
),
)
# if the element is not an enum it will be a filler,
# so we append it directly
else:
index.append(i)
return copy.deepcopy(tuple(index))
# to filter a size/shape array depending on the active dimension index
# by selecting only the dimensions with the enum
def filter_dimensions(
self,
# a normal size/shape array
dimensions: typing.Sequence[int],
) -> typing.Tuple[int, ...]:
if self.active_dimension_index is None:
raise ValueError(
"First select the active dimensions with select_active_dimensions"
)
enum_types = [
e
for e in self.active_dimension_index
if isinstance(e, enpheeph.utils.enums.DimensionType)
]
active_dimension_index: typing.List[
enpheeph.utils.typings.ActiveDimensionIndexType
] = copy.deepcopy(self.active_dimension_index)
for e in enum_types:
if self.dimension_dict[e] == Ellipsis:
while len(dimensions) > len(active_dimension_index):
active_dimension_index.insert(active_dimension_index.index(e), e)
# this is executed if the loop exits normally
else:
if len(dimensions) != len(active_dimension_index):
raise ValueError(
"dimensions must be the same length of active_dimension_index "
"if no Ellipsis are used"
)
return_dimensions = []
for d, ind in zip(dimensions, active_dimension_index):
if isinstance(ind, enpheeph.utils.enums.DimensionType):
return_dimensions.append(d)
return tuple(return_dimensions)
| Alexei95/enpheeph | src/enpheeph/injections/plugins/indexing/indexingplugin.py | indexingplugin.py | py | 9,122 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "enpheeph.injections.plugins.indexing.abc.indexingpluginabc.injections",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "enpheeph.injections.plugins.indexing.abc.indexingpluginabc",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Opti... |
15715933133 | import json
import os
import sys
from tempfile import NamedTemporaryFile
DEPRECATED_KEYS = [
'site_yaml_path',
'inventory_config',
'variable_manager_config',
'passwords',
'modules',
'private_key_file']
LIST_TYPES = ['skip-tags', 'tags']
DIRECT_PARAMS = ['start_at_task', 'scp_extra_args', 'sftp_extra_args',
'ssh_common_args', 'ssh_extra_args', 'timeout']
def get_fileno():
try:
return sys.stdout.fileno
except AttributeError:
return
class CloudifyAnsibleSDKError(Exception):
"""Generic Error for handling issues preparing
the Ansible Playbook.
"""
pass
class AnsiblePlaybookFromFile(object):
""" Object for communication to Ansible Library."""
def __init__(self,
playbook_path=None,
sources='localhost,',
options_config=None,
run_data=None,
verbosity=2,
logger=None,
site_yaml_path=None,
environment_variables=None,
additional_args=None,
**kwargs):
self.playbook = site_yaml_path or playbook_path
self.sources = sources
self.options_config = options_config or {}
self.run_data = run_data or {}
self.environment_variables = environment_variables or {}
self.additional_args = additional_args or ''
self._verbosity = verbosity
self.logger = logger
for deprecated_key in DEPRECATED_KEYS:
if deprecated_key in kwargs:
self.logger.error(
'This key been deprecated: {0} {1}'.format(
deprecated_key, kwargs[deprecated_key]))
# add known additional params to additional_args
for field in DIRECT_PARAMS:
if kwargs.get(field):
self.additional_args += "--{field} {value} ".format(
field=field.replace("_", "-"),
value=json.dumps(kwargs[field]))
@property
def env(self):
_env = os.environ.copy()
for key, value in self.environment_variables.items():
_env[key] = value
return _env
@property
def verbosity(self):
verbosity = '-v'
for i in range(1, self._verbosity):
verbosity += 'v'
return verbosity
@property
def options(self):
options_list = []
if 'extra_vars' not in self.options_config:
self.options_config['extra_vars'] = {}
self.options_config['extra_vars'].update(self.run_data)
for key, value in self.options_config.items():
if key == 'extra_vars':
f = NamedTemporaryFile(delete=False)
with open(f.name, 'w') as outfile:
json.dump(value, outfile)
value = '@{filepath}'.format(filepath=f.name)
elif key == 'verbosity':
self.logger.error('No such option verbosity')
del key
continue
key = key.replace("_", "-")
if isinstance(value, dict):
value = json.dumps(value)
elif isinstance(value, list) and key not in LIST_TYPES:
value = [i for i in value]
elif isinstance(value, list):
value = u",".join(value)
options_list.append(
'--{key}={value}'.format(key=key, value=repr(value)))
return ' '.join(options_list)
@property
def process_args(self):
return [
self.verbosity,
'-i {0}'.format(self.sources),
self.options,
self.additional_args,
self.playbook
]
def execute(self, process_execution_func, **kwargs):
return process_execution_func(**kwargs)
| christaotaoz/shkd-work | work/doc/srv6+5G/ansible8.82/cloudify_ansible_sdk/__init__.py | __init__.py | py | 3,848 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdout",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.environ.copy",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_num... |
74791240424 | import math
import json
import random
import argparse
def genRandomFeatures(n):
features = []
for i in range(0, n):
lat = (random.random() - 0.5) * 360.0
lng = (random.random() - 0.5) * 180.0
geom = { 'type': 'Point', 'coordinates': [lat, lng] }
props = { 'class': 1 if random.random() > 0.5 else 0 }
feature = { 'type': 'Feature', 'properties': props, 'geometry': geom }
features.append(feature)
return features
def genGridFeatures(nx, ny):
features = []
for i in range(0, nx):
for j in range(0, ny):
lat = (i - 0.5) * 360.0 / nx
lng = (j - 0.5) * 180.0 / ny
geom = { 'type': 'Point', 'coordinates': [lat, lng] }
props = { 'class': 1 if random.random() > 0.5 else 0 }
feature = { 'type': 'Feature', 'properties': props, 'geometry': geom }
features.append(feature)
return features
def main():
parser = argparse.ArgumentParser()
parser.add_argument(dest='tableName', help='The name of the db table')
parser.add_argument(dest='numPoints', type=int, help='The number of random points')
args = parser.parse_args()
features = genRandomFeatures(args.numPoints)
print("DROP TABLE IF EXISTS %s;" % args.tableName)
print("CREATE TABLE %s(gid serial PRIMARY KEY, geom GEOMETRY, attr NUMERIC);" % args.tableName)
for feature in features:
geom = "POINT(%g %g)" % tuple(feature['geometry']['coordinates'])
print("INSERT INTO %s VALUES (DEFAULT, GeomFromEWKT('SRID=4326;%s'), %d);" % (args.tableName, geom, feature['properties']['class']))
if __name__ == "__main__":
main()
| decision-labs/mapnik | benchmark/utils/random_points.py | random_points.py | py | 1,569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.random",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_nu... |
24417230499 | from os.path import exists
from pyimpspec.data.data_set import (
DataSet,
dataframe_to_data_sets,
)
from typing import List
def parse_spreadsheet(path: str, **kwargs) -> List[DataSet]:
"""
Parse a spreadsheet (.xlsx or .ods) containing one or more impedance spectra.
Parameters
----------
path: str
The path to the file to process.
kwargs:
Keyword arguments are passed forward to `pandas.read_excel`_.
Returns
-------
List[DataSet]
"""
from pandas import (
DataFrame,
read_excel,
)
assert isinstance(path, str) and exists(path), path
data_sets: List[DataSet] = []
if "sheet_name" not in kwargs:
kwargs["sheet_name"] = None
label: str
df: DataFrame
for label, df in read_excel(path, **kwargs).items():
data_sets.extend(dataframe_to_data_sets(df, path=path, label=label))
assert isinstance(data_sets, list), data_sets
assert all(map(lambda _: isinstance(_, DataSet), data_sets))
return data_sets
| vyrjana/pyimpspec | src/pyimpspec/data/formats/spreadsheet.py | spreadsheet.py | py | 1,039 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pyimpspec.data.data_set.DataSet",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pandas.Dat... |
40618220774 | from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^register/',views.mapiview.as_view()),
url(r'^editview/',views.mapiview1.as_view()),
url(r'^update/',views.mapiview2.as_view()),
url(r'^vcus/',views.vcustomer),
url(r'^registercus/',views.post),
url(r'^viewtr/(?P<idd>\w+)',views.viewtr,name="viewtr"),
# (?P<idd>\w+)
] | jannamariyam/GOLD_APP | SPRINT 4/web/goldinapp/customer/urls.py | urls.py | py | 366 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.co... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.