seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15412087448 | from django.shortcuts import render
from django.http import HttpResponse
from django.utils.translation import get_language
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.conf import settings
from . import Checksum
from paytm.models import PaytmHistory
# Create your views here.
@login_required
def home(request):
return HttpResponse("<html><a href='"+ settings.HOST_URL +"/paytm/payment'>PayNow</html>")
def payment(request):
MERCHANT_KEY = settings.PAYTM_MERCHANT_KEY
MERCHANT_ID = settings.PAYTM_MERCHANT_ID
get_lang = "/" + get_language() if get_language() else ''
CALLBACK_URL = settings.HOST_URL + get_lang + settings.PAYTM_CALLBACK_URL
# Generating unique temporary ids
order_id = Checksum.__id_generator__()
bill_amount = 100
if bill_amount:
data_dict = {
'MID':MERCHANT_ID,
'ORDER_ID':order_id,
'TXN_AMOUNT': bill_amount,
'CUST_ID':'harish@pickrr.com',
'INDUSTRY_TYPE_ID':'Retail',
'WEBSITE': settings.PAYTM_WEBSITE,
'CHANNEL_ID':'WEB',
#'CALLBACK_URL':CALLBACK_URL,
}
param_dict = data_dict
param_dict['CHECKSUMHASH'] = Checksum.generate_checksum(data_dict, MERCHANT_KEY)
return render(request,"payment.html",{'paytmdict':param_dict})
return HttpResponse("Bill Amount Could not find. ?bill_amount=10")
@csrf_exempt
def response(request):
if request.method == "POST":
MERCHANT_KEY = settings.PAYTM_MERCHANT_KEY
data_dict = {}
for key in request.POST:
data_dict[key] = request.POST[key]
verify = Checksum.verify_checksum(data_dict, MERCHANT_KEY, data_dict['CHECKSUMHASH'])
if verify:
PaytmHistory.objects.create(user=request.user, **data_dict)
return render(request,"response.html",{"paytm":data_dict})
else:
return HttpResponse("checksum verify failed")
return HttpResponse(status=200) | harishbisht/paytm-django | payments/paytm/views.py | views.py | py | 2,130 | python | en | code | 31 | github-code | 6 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.HOST_URL",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 16,
"usage_type": "name"
},
{
"... |
72357813309 | # ==============================================================================
# Main runner entry point for the project
# Implemented using SAGE math library
#
# Author: Malo RANZETTI
# Date: Spring 2023
# ==============================================================================
import os
import msidh
import sidh
import sage.all as sage
import time
import argparse
import numpy as np
parser = argparse.ArgumentParser(
prog='M-SIDH Demo Runner',
description='This program is a demo built using SAGE of proposed countermeasures to the SIDH scheme.',
epilog='Written by M.Ranzetti')
def test_SIDH(curve, n_rounds=10):
# ==============================================================================
# TEST SIDH
# NIST-LEVEL 5 (AES-256 security) 21s for the runtime
# ==============================================================================
print("Testing SIDH protocol...")
scheme = sidh.create_protocol(sidh.get_curve(curve))
results = []
for i in range(n_rounds):
print(f"Round {i+1}/{n_rounds}")
results.append(scheme.run())
print(f"Average time: {sum([r[1]for r in results])/n_rounds * 1e-9}s")
print(f"Failure count: {sum([1 for r in results if not r[0]])}")
average_time = sum([r[1]for r in results])/n_rounds * 1e-9
std = np.std([r[1]for r in results])
failure_count = sum([1 for r in results if not r[0]])
data = {
'settings': curve,
'average_time': average_time,
'std': std,
'failure_count': failure_count
}
return data
def test_MSIDH(filename, n_rounds=10):
# ==============================================================================
# TEST MSIDH
# Current maximum tested: t = 90 // 100 // 200
# GOAL -> t = 572 for AES-128 security
# Settings generation: 32.8s // 294.6s // 194.4s
# Protocol execution: 5.3s // 35.0s // 320.9s
# Currently the biggest bottlenecks are:
# - prime verification in EllipticCurve (OVERRIDEN IN SAGE SOURCE CODE)
# - computing the generators of the curve (=> there might be a way to optimize this)
#
# ==============================================================================
print("Testing MSIDH protocol...")
scheme = msidh.create_protocol_from_file(filename)
results = []
for i in range(n_rounds):
print(f"Round {i+1}/{n_rounds}")
results.append(scheme.run())
print(f"Average time: {sum([r[1]for r in results])/n_rounds * 1e-9}s")
print(f"Failure count: {sum([1 for r in results if not r[0]])}")
average_time = sum([r[1]for r in results])/n_rounds * 1e-9
std = np.std([r[1]for r in results])
failure_count = sum([1 for r in results if not r[0]])
data = {
'settings': filename.split('AES-')[1].split('.')[0],
'average_time': average_time,
'std': std,
'failure_count': failure_count
}
return data
def gen_MSIDH128():
msidh.create_g128_protocol()
def create_msidh(lam):
msidh.create_protocol(msidh.MSIDHpArbitrary, lam)
def output_data(filename, data):
'''
Write the data given as an array into csv format
data: dict of the form {name: [data list]}
'''
# check if file exists
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write(','.join(data.keys()) + '\n')
with open(filename, 'a') as f:
# add a line
f.write(','.join([str(v) for v in data.values()]) + '\n')
print(f"Data written to {filename}")
if __name__ == "__main__":
parser.add_argument('-t', '--test', type=str, choices=['sidh', 'msidh'], help='Test to run (sidh, msidh)')
parser.add_argument('-c', '--curve', type=str, choices=list(sidh.available_curves.keys()) ,help='Curve to use for SIDH')
parser.add_argument('-f', '--file', type=str, help='File to use for MSIDH paramters')
parser.add_argument('-r', '--rounds', type=int, default=10, help='Number of rounds to run tests for')
parser.add_argument('-g', '--gen', type=int, help='generate MSIDH parameters for a given security level')
parser.add_argument('-g128', '--gen128', action='store_true', help='generate MSIDH-128 parameters')
args = parser.parse_args()
if args.gen:
create_msidh(args.gen)
elif args.gen128:
gen_MSIDH128()
elif args.test == 'sidh':
if not args.curve:
print("Please provide a curve to use for SIDH using -c")
exit(1)
data = test_SIDH(args.curve, args.rounds)
output_data("sidh_results.csv", data)
elif args.test == 'msidh':
if not args.file:
print("Please provide a file to use for MSIDH using -f")
print("You can generate a file using -g <security level>")
exit(1)
data = test_MSIDH(args.file, args.rounds)
output_data("msidh_results.csv", data)
else:
print("Invalid arguments, use -h for help")
exit(1)
| mrztti/M-SIDH | run.py | run.py | py | 5,043 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sidh.create_protocol",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sidh.get_curve",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.std... |
31512974964 | import json
import requests
import constants
import tokens
def make_api_call(access_token, url, method, **kwargs):
response = method(
url=url,
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {access_token}",
},
**kwargs,
)
if not response.ok:
raise RuntimeError(f"Error making request to {url}: ", response.content)
return response
def get_user_id_and_name_from_refresh_token(refresh_token):
access_token = tokens.get_access_token(refresh_token)
user_response = make_api_call(access_token, constants.API_URL + "/me", requests.get)
return user_response.json()["id"], user_response.json()["display_name"]
def make_new_playlist(user):
response = make_api_call(
user.access_token,
url=f"{constants.API_URL}/users/{user.user_id}/playlists",
method=requests.post,
data=json.dumps({"name": constants.DEFAULT_PLAYLIST_NAME, "description": f"Here are your most recently added songs from the last {user.recently_added_delta_days} days"})
)
return response.json()["id"]
| rjshearme/spotify_recently_added_playlist | api.py | api.py | py | 1,168 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tokens.get_access_token",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "constants.API_URL",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cons... |
75066284348 | import torch
import logging
import pickle
from core.utils import scale_image, save_layer_image
from data import image_corruption
def evaluate(model, loader, args, perturbation=False, pSize=0, **kwargs):
objective_function= kwargs.get('objective_function', None)
device = kwargs['device']
if 'epoch' in kwargs:
epoch = kwargs['epoch']
ibatch = kwargs["ibatch"]
else:
epoch = 'Test'
ibatch = 0
validloss = 0
for i, (images, _) in enumerate(loader):
if i == 200:
break
corrupt_img, _ = image_corruption(images, args, perturbation=perturbation, pSize=pSize) #blurring, inpainting, noising
corrupt_img_scale = scale_image(corrupt_img, args.nBits)
restored_img, outsx, _ = model(corrupt_img_scale.to(device), objective_function=objective_function, noisyOuts=args.noisyOuts)
validloss += (torch.norm(restored_img.detach().cpu() - images, p='fro')**2).item() #MSE
if i == 0:
if epoch == 'Test':
with open(f'./results/{args.dataset}_{args.constrained}_{pSize}.pkl', 'wb') as ObjFile:
pickle.dump((images, corrupt_img, restored_img, epoch, ibatch), ObjFile)
save_layer_image(images, corrupt_img, outsx, epoch, ibatch, args, perturbation=False)
break
del images, corrupt_img, restored_img, outsx, corrupt_img_scale
torch.cuda.empty_cache()
validloss /= (len(loader)*args.batchSize)
logging.debug("Epoch {} - Batch {}, Loss {:.4f}".format(epoch, ibatch, validloss))
return validloss
| SMRhadou/UnrolledGlow | core/evaluation.py | evaluation.py | py | 1,612 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "data.image_corruption",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "core.utils.scale_image",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pickle.dump",... |
24522571380 | import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
# 定义客户端类
class Client:
def __init__(self, model, train_loader, test_loader, lr=0.1):
self.model = model
self.train_loader = train_loader
self.test_loader = test_loader
self.optimizer = optim.SGD(self.model.parameters(), lr=lr)
def train(self, epochs=1):
self.model.train()
for epoch in range(epochs):
for data, target in self.train_loader:
self.optimizer.zero_grad()
output = self.model(data)
loss = nn.functional.cross_entropy(output, target)
loss.backward()
self.optimizer.step()
def test(self):
self.model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in self.test_loader:
output = self.model(data)
test_loss += nn.functional.cross_entropy(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(self.test_loader.dataset)
accuracy = 100. * correct / len(self.test_loader.dataset)
return test_loss, accuracy
def get_topk_grads(self, k=0.01):
grads = []
for param in self.model.parameters():
if param.grad is not None:
importance = param.grad.abs().sum()
k_ = int(importance.numel() * k)
topk_values, _ = torch.topk(param.grad.abs().view(-1), k_)
mask = torch.zeros_like(param.grad)
mask[param.grad.abs() >= topk_values[-1]] = 1
grads.append(mask * param.grad)
return torch.cat(grads)
# 定义服务器类
class Server:
def __init__(self, clients):
self.clients = clients
def aggregate(self):
grads = None
num_grads = 0
for client in self.clients:
client_grads = client.get_topk_grads()
if grads is None:
grads = client_grads
else:
grads += client_grads
num_grads += 1
grads /= num_grads
return grads
# 定义主函数
def main():
# 加载MNIST数据集
train_dataset = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
test_dataset = datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
train_loaders = [torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) for _ in range(10)]
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1000, shuffle=True)
# 初始化客户端和服务器
clients = [Client(nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 10)), train_loader, test_loader) for train_loader in train_loaders]
server = Server(clients)
# 训练和聚合
for round in range(num_rounds):
print(f"\nRound {round + 1}")
# 客户端训练并计算梯度
grad_list = []
for client in clients:
client.train()
client_optimizer.zero_grad()
# 获取客户端的本地数据
inputs, labels = client.get_data()
# 前向传播
outputs = client.model(inputs)
# 计算局部loss
loss = criterion(outputs, labels)
# 反向传播
loss.backward()
# 获取梯度并压缩
grad = client.get_topk_grads()
grad_list.append(grad)
# 服务器聚合梯度并更新模型
server.aggregate(grad_list)
server.update_model()
# 客户端更新模型
for client in clients:
client.update_model(server.model)
# 计算并打印全局loss
global_loss = evaluate_global_loss(server.model, test_loader, criterion)
print(f"Global Loss: {global_loss:.4f}")
| huguangs/NIID-Bench-main-master | top-k/main.py | main.py | py | 4,358 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.optim.SGD",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.n... |
70680835069 | from flask import Flask
from flask import render_template
import ffmpeg_streaming
from ffmpeg_streaming import Formats
import sys
app = Flask(__name__)
@app.route("/")
def streaming():
return render_template('streaming.html')
@app.route('/video')
def video_server():
video = ffmpeg_streaming.input('pexels_video.mp4')
hls = video.hls(Formats.h264())
hls.auto_generate_representations()
hls.save_master_playlist('/var/media/hls.m3u8')
return hls.output('/var/media/hls.m3u8')
def monitor(ffmpeg, duration, time_, time_left, process):
per = round(time_ / duration * 100)
sys.stdout.write(
"\rTranscoding...(%s%%) %s left [%s%s]" %
(per, datetime.timedelta(seconds=int(time_left)), '#' * per, '-' * (100 - per))
)
sys.stdout.flush() | ifcassianasl/python_test_rtsp | main.py | main.py | py | 764 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ffmpeg_streaming.input",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ffmpeg_stream... |
11232494681 | from collections import defaultdict
ENTER = "Enter"
LEAVE = "Leave"
CHANGE = "Change"
ENTER_MESSAGE = "님이 들어왔습니다."
LEAVE_MESSAGE = "님이 나갔습니다."
class ChatRoom:
def __init__(self):
super().__init__()
def operation(result, command, chatRoom, nicknames, uid="", name=""):
if command == ENTER:
chatRoom.append(uid)
nicknames[uid] = name
result.append((uid, ENTER_MESSAGE))
return
elif command == LEAVE:
chatRoom.remove(uid)
result.append((uid, LEAVE_MESSAGE))
return
elif command == CHANGE:
nicknames[uid] = name
return
return
def solution(record):
answer = []
nicknames = defaultdict(str)
chatRoom = []
result = []
command, uid, name = "", "", ""
for r in record:
splited = r.split()
if len(splited) == 2:
command, uid = splited
else:
command, uid, name = splited
operation(result, command, chatRoom, nicknames, uid, name)
answer = list(map(lambda x: nicknames[x[0]]+x[1], result))
return answer
if __name__ == "__main__":
test = 1
if test == 1:
record = ["Enter uid1234 Muzi", "Enter uid4567 Prodo", "Leave uid1234", "Enter uid1234 Prodo", "Change uid4567 Ryan"]
print(solution(record))
| gatherheart/Solved_PS | KAKAO/2019_KAKAO_1.py | 2019_KAKAO_1.py | py | 1,378 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 33,
"usage_type": "call"
}
] |
70276262907 | import torch
import time
import argparse
from importlib import import_module
import numpy as np
import utils
import train
parser = argparse.ArgumentParser(description='BertClassifier')
# parser.add_argument('--model', type=str, default='BertFc', help='choose a model')
# parser.add_argument('--model', type=str, default='BertCNN', help='choose a model')
# parser.add_argument('--model', type=str, default='BertRNN', help='choose a model')
# parser.add_argument('--model', type=str, default='BertDPCNN', help='choose a model')
# parser.add_argument('--model', type=str, default='ERNIE', help='choose a model')
parser.add_argument('--model', type=str, default='ERNIEDPCNN', help='choose a model')
args = parser.parse_args()
if __name__ == '__main__':
print(torch.__version__)
# 数据集地址
dataset = 'THUCNews'
model_name = args.model
x = import_module(
'models.' + model_name) # <module 'models.BertFc' from '/home/hadoop/PycharmProjects/BertClassifier/models/BertFc.py'>
config = x.Config(dataset)
print(config.model_name)
# print(config)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(4)
torch.backends.cudnn.deterministic=True # 保证每次运行结果一样
# 加载数据集
start_time=time.time()
print('加载数据集')
train_data,dev_data,test_data=utils.build_dataset(config)
train_iter=utils.build_iterator(train_data,config)
test_iter=utils.build_iterator(test_data,config)
dev_iter=utils.build_iterator(dev_data,config)
time_dif=utils.get_time_dif(start_time)
print("模型开始之前,准备数据时间:", time_dif)
# for i,(train,label) in enumerate(dev_iter):
# if (i%10==0):
# print(i,label) # dev contains 10000 items,10000/128=78.125,residue=True,79 batches,the batch 79st only has 16 items
# 模型训练,评估与测试
model=x.Model(config).to(config.device)
train.train(config,model,train_iter,dev_iter,test_iter)
| Jacquelin803/Transformers | BertClassifier/main.py | main.py | py | 2,007 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.__version__",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "importlib.import_module",
"line_number": 24,
"usage_type": "call"
},
{
"api_name":... |
779248836 | from dataclasses import dataclass
from typing import Annotated, List
from fastapi import Depends
from fastapi_pagination import Page
from fastapi_pagination.ext.sqlalchemy import paginate
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from config.db import get_db_session
from readconnect.books.domain.dtos.books_query_params import BooksQueryParams
from readconnect.books.domain.models.book_model import Book
from readconnect.books.infrastructure.db.entities.book_entity import BookEntity
@dataclass()
class BooksRepository:
db: Annotated[AsyncSession, Depends(get_db_session)]
async def create_many(self, new_books: List[Book]):
books = [BookEntity(**book) for book in new_books]
self.db.add_all(books)
await self.db.commit()
return books
async def find_by_id(self, book_id: str) -> BookEntity:
query = select(BookEntity).where(BookEntity.id == book_id)
result = await self.db.execute(query)
return result.scalar()
async def find(self, query: BooksQueryParams) -> Page[Book]:
q = select(
BookEntity.id,
BookEntity.title,
BookEntity.isbn,
BookEntity.long_description,
BookEntity.short_description,
BookEntity.published_date,
BookEntity.thumbnail_url,
BookEntity.page_count,
BookEntity.status,
)
if query.include_extra_data:
q = select(BookEntity).join(BookEntity.authors).join(BookEntity.categories)
return await paginate(self.db, q)
return await paginate(self.db, q)
async def search(self, query: BooksQueryParams) -> Page[Book]:
q = select(
BookEntity.id,
BookEntity.title,
BookEntity.isbn,
BookEntity.long_description,
BookEntity.short_description,
BookEntity.published_date,
BookEntity.thumbnail_url,
BookEntity.page_count,
BookEntity.status,
).filter(
BookEntity.title.icontains(query.search)
| BookEntity.isbn.icontains(query.search)
)
if query.include_extra_data:
q = (
select(BookEntity)
.filter(
BookEntity.title.icontains(query.search)
| BookEntity.isbn.icontains(query.search)
)
.join(BookEntity.authors)
.join(BookEntity.categories)
)
return await paginate(self.db, q)
return await paginate(self.db, q)
| YeisonKirax/readconnect-back | src/readconnect/books/infrastructure/db/repository/books_repository.py | books_repository.py | py | 2,625 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Annotated",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "... |
37562147384 | """helper
=============
Helper functions for inventory scripts.
"""
__author__ = "Balz Aschwanden"
__email__ = "balz.aschwanden@unibas.ch"
__copyright__ = "Copyright 2017, University of Basel"
__credits__ = ["Balz Aschwanden"]
__license__ = "GPL"
import json
import os
import socket
def get_hostname():
"""Return FQDN for this host."""
return socket.gethostname()
def get_simple_hostname(fqdn):
"""Convert FQDN and return simple host name."""
simple_hostname = fqdn.split(".")[0]
return simple_hostname
def format_output(output):
"""Return results in Ansible JSON syntax.
Ansible requirements are documented here:
http://docs.ansible.com/ansible/latest/dev_guide/developing_inventory.html
"""
return json.dumps(output, sort_keys=True, indent=4, separators=(",", ": "))
def write_cache(cache_file, output):
"""Format and write inventory cache to file."""
with open(cache_file, "w") as cache:
for line in format_output(output):
cache.write(line)
def read_cache(cache_file):
"""Read cache file and return content or False."""
if not os.path.isfile(cache_file):
return False
with open(cache_file, "r") as cache:
return cache.read()
if __name__ == "__main__":
pass
| ANTS-Framework/ants | antslib/inventory/helper.py | helper.py | py | 1,278 | python | en | code | 42 | github-code | 6 | [
{
"api_name": "socket.gethostname",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
74796406586 | from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
import cv2
from PIL import Image
# 1、transform使用Totensor
img_path = "../dataset/train/ants/0013035.jpg"
img_PIL = Image.open(img_path)
tensor_trans = transforms.ToTensor()
img_tensor = tensor_trans(img_PIL)
# 2、tensor数据类型
writer = SummaryWriter("../logs")
writer.add_image("Tensor_img", img_tensor)
# 3、normalize
print(img_tensor[0, 0, 0])
trans_normal = transforms.Normalize([9, 0, 1], [1, 4, 6])
normal_img = trans_normal(img_tensor)
print(normal_img[0, 0, 0])
writer.add_image("normalize", normal_img, 2)
# 4、resize
print(img_PIL.size)
trans_resize = transforms.Resize((512, 512))
img_resize = trans_resize(img_PIL)
print(img_resize.size)
img_resize_tensor = tensor_trans(img_resize)
writer.add_image("resize", img_resize_tensor)
# 5、compose
trans_resize_2 = transforms.Resize(1080)
trans_compose = transforms.Compose([trans_resize_2, tensor_trans])
img_resize_2 = trans_compose(img_PIL)
print(img_resize_2.size())
writer.add_image("resize", img_resize_2, 2)
# 6、Randomcrop
trans_random_crop = transforms.RandomCrop([100, 200])
trans_compose_2 = transforms.Compose([trans_random_crop, tensor_trans])
for i in range(10):
img_crop = trans_compose_2(img_PIL)
writer.add_image("random_cropHW", img_crop, i)
writer.close() | ccbit1997/pytorch_learning | src/learn_transform.py | learn_transform.py | py | 1,344 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.tr... |
43168508245 | import asyncio
import json
import logging
import typing
from pathlib import Path
import discord
from discord.ext import commands
from fuzzywuzzy import process
from roycemorebot.constants import (
Categories,
Channels,
Emoji,
Guild,
MOD_ROLES,
StaffRoles,
)
log = logging.getLogger(__name__)
class Subscriptions(commands.Cog):
"""User-assigned subscriptions to select announcements."""
def __init__(self, bot: commands.Bot):
self.bot = bot
self._announcement_roles = self.load_announcement_roles()
@commands.Cog.listener()
async def on_ready(self) -> None:
"""Load the announcement roles, but only once guilds are available."""
if self._announcement_roles != {}:
return
log.info("No announcement roles found, requesting to reload")
mod_bot_channel = self.bot.get_channel(Channels.mod_bot_commands)
guild = discord.utils.get(self.bot.guilds, id=Guild.guild_id)
mod_role = discord.utils.get(guild.roles, id=StaffRoles.mod_role)
msg = await mod_bot_channel.send(
f"{mod_role.mention}\nNo announcement roles are loaded. Reload?"
)
await msg.add_reaction("✅")
await msg.add_reaction("❌")
try:
reaction, user = await self.bot.wait_for(
"reaction_add",
timeout=300.0,
check=lambda r, u: str(r.emoji) in ["✅", "❌"]
and r.message == msg
and not u.bot,
)
except asyncio.TimeoutError:
log.info("Reload timed out")
await mod_bot_channel.send(
"Announcement role reload timeout. Use `?subscriptions reload` "
+ "to reload the announcement roles."
)
else:
if str(reaction.emoji) == "✅":
log.info(f"Announcement role reload started by {user}")
self._announcement_roles = self.reload_announcement_roles()
await mod_bot_channel.send(
f"{Emoji.ok} Successfully reloaded announcement roles!"
)
else:
log.info(f"Announcement role reload canceled by {user}")
await mod_bot_channel.send(
f"{Emoji.no} Announcement role reload canceled. Use "
+ "`?subscriptions reload` to reload the announcement roles."
)
@staticmethod
def load_announcement_roles() -> "dict[str, dict[str, typing.Union[int, bool]]]":
"""Load all the announcement roles from the save file."""
save_file = Path("data", "announcement_roles.json")
if save_file.is_file():
log.info("Loaded announcement roles from save file")
with save_file.open("r") as f:
roles = json.load(f)
log.trace(f"File contents: {roles}")
return roles
else:
return {} # Checked later in `on_ready` and loaded from guild.
def reload_announcement_roles(
self,
) -> "dict[str, dict[str, typing.Union[int, bool]]]":
"""Reload the list of all the announcement roles in the current guild."""
announcement_roles = {}
guild = discord.utils.get(self.bot.guilds, id=Guild.guild_id)
clubs_category = discord.utils.get(guild.categories, id=Categories.clubs)
log.trace("Starting role reload.")
# Get server and event announcements seperately
announcement_roles["server"] = {
"id": discord.utils.get(guild.roles, name="Server Announcements").id,
"club": False,
}
announcement_roles["event"] = {
"id": discord.utils.get(guild.roles, name="Event Announcements").id,
"club": False,
}
for channel in clubs_category.channels:
announcement_role = discord.utils.find(
lambda role: "Announcements" in role.name
and role.name.lower().startswith(channel.name)
and role.name.index(" ") == len(channel.name), # prevents overlap
guild.roles,
)
log.trace(f"Channel: {channel.name}, role: {announcement_role}")
announcement_roles[channel.name] = {
"id": announcement_role.id,
"club": "club" in announcement_role.name.lower(),
}
log.trace("Saving announcement roles.")
save_file = Path("data", "announcement_roles.json")
save_file.parent.mkdir(exist_ok=True)
with save_file.open("w") as f:
json.dump(announcement_roles, f, indent=2)
log.info("Announcement role reload finished")
return announcement_roles
@commands.guild_only()
@commands.command(aliases=("sub",))
async def subscribe(self, ctx: commands.Context, announcement_name: str) -> None:
"""Subscribe to an announcement role on the server."""
all_roles = list(self._announcement_roles.keys())
log.trace(f"All roles: {all_roles}")
match_info = process.extractOne(
announcement_name,
all_roles,
score_cutoff=75,
)
log.trace(f"Match info: {match_info}")
author_ping = ctx.author.mention
if match_info:
role = discord.utils.get(
ctx.guild.roles, id=self._announcement_roles[match_info[0]]["id"]
)
log.trace(f"Matched role `{role}` with probability {match_info[1]}")
await ctx.author.add_roles(
role,
reason="User announcements subscription",
)
log.info(f"User {ctx.author} subscribed to {role}")
if ctx.message.channel.id == Channels.roles:
await ctx.send(
f"{author_ping}, you have successfully subscribed to {role}.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(
f"{author_ping}, you have successfully subscribed to {role}.",
)
else:
if ctx.message.channel.id == Channels.roles:
await ctx.send(
f"{author_ping}, there are no announcement roles with that name.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(
f"{author_ping}, there are no announcement roles with that name."
)
@commands.guild_only()
@commands.command(aliases=("unsub",))
async def unsubscribe(self, ctx: commands.Context, announcement_name: str) -> None:
"""Unsubscribe to an announcement role on the server."""
all_roles = list(self._announcement_roles.keys())
log.trace(f"All roles: {all_roles}")
match_info = process.extractOne(
announcement_name,
all_roles,
score_cutoff=75,
)
log.trace(f"Match info: {match_info}")
author_ping = ctx.author.mention
if match_info:
role = discord.utils.get(
ctx.guild.roles, id=self._announcement_roles[match_info[0]]["id"]
)
log.trace(f"Matched role `{role}` with probability {match_info[1]}")
await ctx.author.remove_roles(
role,
reason="User announcements unsubscription",
)
log.info(f"User {ctx.author} unsubscribed from {role}")
if ctx.message.channel.id == Channels.roles:
await ctx.send(
f"{author_ping}, you have successfully unsubscribed from {role}.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(
f"{author_ping}, you have successfully unsubscribed from {role}.",
)
else:
if ctx.message.channel.id == Channels.roles:
await ctx.send(
f"{author_ping}, there are no announcement roles with that name.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(
f"{author_ping}, there are no announcement roles with that name."
)
@commands.guild_only()
@commands.group(
name="subscriptions", aliases=("subs",), invoke_without_command=True
)
async def subscriptions_group(self, ctx: commands.Context) -> None:
"""Commands group for managing announcement subscriptions."""
await ctx.send_help(ctx.command)
@commands.guild_only()
@subscriptions_group.command(name="list", aliases=("l", "ls"))
async def list_subscriptions(self, ctx: commands.Context) -> None:
"""List all possible announcement subscriptions and their corresponding commands.""" # noqa: B950
embed = discord.Embed(
title="Announcement Subscriptions",
description="Here are all the possible announcement subscriptions and "
+ "their commands.",
color=discord.Colour.green(),
)
all_subs = list(self._announcement_roles.keys())
for subscription in all_subs:
club = self._announcement_roles[subscription]["club"]
embed.add_field(
name=f"{subscription.title()}{' Club' if club else ''} Announcements",
value=f"`?subscribe {subscription}`",
inline=True,
)
if ctx.channel.id == Channels.roles:
await ctx.send(
f"{ctx.author.mention}, please use a bot channel to run that command.",
delete_after=5.0,
)
await asyncio.sleep(5.0)
await ctx.message.delete()
else:
await ctx.send(embed=embed)
@commands.guild_only()
@commands.has_any_role(*MOD_ROLES)
@subscriptions_group.command(aliases=("r",))
async def reload(self, ctx: commands.Context) -> None:
"""Reload the announcement roles save."""
self._announcement_roles = self.reload_announcement_roles()
await ctx.send(f"{Emoji.ok} Successfully reloaded announcement roles!")
@commands.guild_only()
@commands.has_role(StaffRoles.admin_role)
@subscriptions_group.command(name="add-club", aliases=("add", "ac", "a-c", "a"))
async def add_club(
self,
ctx: commands.Context,
channel_name: str,
leaders: commands.Greedy[discord.Member] = None,
club: bool = True,
*,
leader_title: typing.Optional[str] = "Leader",
) -> None:
"""Create a new club channel with corresponding roles and leaders (if given)."""
guild = ctx.guild
name = channel_name.replace(" ", "-").lower() # Discord-safe channel names
log.info(f"Creating a new club channel at the request of {ctx.author}")
leader_names = (
list(map(lambda l: l.name + "#" + l.discriminator, leaders))
if leaders
else None
)
log.info(
f"Name: {name}, leaders: {leader_names}, club: {club}, "
+ f"leader title: {leader_title}"
)
# Create the roles and assign them
leader_role = await guild.create_role(
name=f"{name.title()}{' Club' if club else ''} {leader_title}",
mentionable=True,
reason="Club creation",
)
ann_role = await guild.create_role(
name=f"{name.title()}{' Club' if club else ''} Announcements",
mentionable=True,
reason="Club creation",
)
log.trace(f"Created {leader_role} and {ann_role} role")
if leaders:
for leader in leaders:
await leader.add_roles(leader_role, reason="Club creation")
log.trace("Assigned leaders their roles")
# Create the channel
clubs_category = discord.utils.get(guild.categories, id=Categories.clubs)
channel = await clubs_category.create_text_channel(
name,
overwrites={
discord.utils.get(
guild.roles, id=StaffRoles.mod_role
): discord.PermissionOverwrite(view_channel=True, send_messages=True),
discord.utils.get(
guild.roles, id=StaffRoles.muted_role
): discord.PermissionOverwrite(send_messages=False),
leader_role: discord.PermissionOverwrite(
view_channel=True,
manage_channels=True,
manage_permissions=True,
send_messages=True,
manage_messages=True,
),
},
reason="Club creation",
)
position = sorted(
clubs_category.text_channels, key=lambda channel: channel.name
).index(channel)
log.trace(f"Channel index: {position}")
await channel.edit(position=position, reason="Club creation")
log.trace(f"Created channel {channel} and moved to postition {position}")
# Load new announcement roles
log.info(
"Reloading announcement roles because of new announcement channel "
+ channel_name
)
self._announcement_roles = self.reload_announcement_roles()
# Completion message
await ctx.send(f"{Emoji.ok} Successfully added club channel!")
@commands.guild_only()
@commands.has_role(StaffRoles.admin_role)
@subscriptions_group.command(
name="remove-club", aliases=("remove", "rm-c", "rmc", "rm")
)
async def remove_club(
self, ctx: commands.Context, club_channel: discord.TextChannel
) -> None:
"""Delete a club channel and roles."""
log.info(
f"Deleteing club channel {club_channel} and roles at the request of "
+ f"{ctx.author}"
)
ann_role = discord.utils.get(
ctx.guild.roles, id=self._announcement_roles[club_channel.name]["id"]
)
await ann_role.delete(reason="Removing club from server")
log.trace("Deleted announcement role")
leader_role = discord.utils.find(
lambda role: role.name.lower().startswith(club_channel.name),
ctx.guild.roles,
)
await leader_role.delete(reason="Removing club from server")
log.trace("Deleted leader role")
await club_channel.delete(reason="Removing club from server")
log.trace("Deleted channel")
self._announcement_roles = self.reload_announcement_roles()
# Completion message
await ctx.send(f"{Emoji.ok} Successfully removed club channel!")
def setup(bot: commands.Bot) -> None:
"""Add the Subscriptions cog to the bot."""
bot.add_cog(Subscriptions(bot))
| egelja/roycemorebot | roycemorebot/exts/subscriptions.py | subscriptions.py | py | 15,209 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "... |
34787406936 | from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from wordcloud_process import wordcloud_img
from article_loader import ArticleLoader
article_loader = ArticleLoader('english_corpora.yaml')
articles = article_loader.load()
def feature_and_matrix(articles, **kwargs):
tfid_vectorizer = TfidfVectorizer(**kwargs)
matrix = tfid_vectorizer.fit_transform(articles)
return tfid_vectorizer.get_feature_names(), matrix.A
def produce_wordcloud(features, vector, file_name):
wordcloud_img(dict((k, v) for k, v in zip(features, vector) if v != 0.),
file_name)
features, matrix = feature_and_matrix(articles, stop_words='english')
for i, vector in enumerate(matrix):
produce_wordcloud(features, vector, f'image/English_News_{i}.png')
print(f'1-gram TF-IDF cosine similarity: {cosine_similarity(matrix)[0, 1]}')
features, matrix = feature_and_matrix(articles, stop_words='english',
ngram_range=(2, 3))
for i, vector in enumerate(matrix):
produce_wordcloud(features, vector, f'image/English_News_{i}_2+3-gram.png')
print(f'2-gram + 3-gram TF-IDF cosine similarity: '\
f'{cosine_similarity(matrix)[0, 1]}') | is3ka1/NLP-Practice | week1/english_news_analyse.py | english_news_analyse.py | py | 1,252 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "article_loader.ArticleLoader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "article_loader.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 12,
"usage_type": "call"
... |
25172905892 | #%%
import pandas as pd
import altair as alt
import numpy as np
from sklearn.neighbors import NearestNeighbors
alt.data_transformers.disable_max_rows()
#%%
RADICL_file="/home/vipink/Documents/FANTOM6/HDBSCAN_RADICL_peak/data/processed/chr16_filter_df.csv"
#%%
radicl_df = pd.read_csv(RADICL_file,delimiter="\t")
# Observe likely PCR duplicates (tiny minority)
#%%
dedup_radicl_df = (radicl_df.loc[:,['chrom','start','end','strand','DNA_start','DNA_end','DNA_strand']]
.drop_duplicates())
# %%
plus_strand_space_df = (dedup_radicl_df
.query("strand == '+'")
.loc[:,['start','DNA_start']]
.reset_index()
)
#%%
nbrs = NearestNeighbors(n_neighbors=2, metric='euclidean',radius=25).fit(plus_strand_space_df.to_numpy())
distances, indices = nbrs.kneighbors(plus_strand_space_df.to_numpy())
# %%
read_neighbour_df = (plus_strand_space_df
.assign(closest_DNA=np.abs(plus_strand_space_df.loc[indices[:,0],'DNA_start'].to_numpy() - plus_strand_space_df.loc[indices[:,1],'DNA_start'].to_numpy()),
closest_RNA=np.abs(plus_strand_space_df.loc[indices[:,0],'start'].to_numpy() - plus_strand_space_df.loc[indices[:,1],'start'].to_numpy())))
# %%
dna_dist_cdf = (read_neighbour_df
.sort_values('closest_DNA')
.groupby('closest_DNA')
.agg(read_count=('start','count'))
.reset_index()
.assign(cread=lambda df_:df_.read_count.cumsum()/plus_strand_space_df.shape[0])
.rename(columns={'closest_DNA':'distance'})
.assign(end='DNA'))
rna_dist_cdf = (read_neighbour_df
.sort_values('closest_RNA')
.groupby('closest_RNA')
.agg(read_count=('start','count'))
.reset_index()
.assign(cread=lambda df_:df_.read_count.cumsum()/plus_strand_space_df.shape[0])
.rename(columns={'closest_RNA':'distance'})
.assign(end='RNA'))
tot_df = pd.concat([rna_dist_cdf,dna_dist_cdf])
# %%
(alt.Chart(tot_df.assign(log_val=lambda df_:np.log10(df_.distance + 1)))
.mark_line(opacity=0.6).encode(
x="log_val:Q",
y='cread:Q',
color="end"
))
# %%
| princeps091-binf/HDBSCAN_RADICL_peak | scripts/RADICL_read_neighbourhood.py | RADICL_read_neighbourhood.py | py | 2,053 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "altair.data_transformers.disable_max_rows",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "altair.data_transformers",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
... |
35413595898 | # -*- coding:utf-8 -*-
import random
import pygame
class BoardManager:
WALL = 0
FOOD = 1
NONE = 2
HEAD = 3
BODY = 4
def __init__(self, x_blocks, y_blocks, block_width, origin_x, origin_y, caption):
self.x_blocks = x_blocks
self.y_blocks = y_blocks
# NONE的方块
self.non_blocks = None
self.total_len = x_blocks * y_blocks
self.blocks_status = None
self.block_width = block_width
self.screen = None
self.origin_x = origin_x
self.origin_y = origin_y
self.caption = caption
pygame.init()
def init_board(self, bg_color=(0, 0, 0), caption=None):
pygame.display.set_caption(caption or self.caption)
board_x, board_y = self.x_blocks * (1 + self.block_width) + \
2 * self.origin_x, (self.y_blocks + 1) * (self.block_width + 1) + self.origin_y
self.screen = pygame.display.set_mode((board_x, board_y), 0, 32)
self.blocks_status = [[self.NONE for _ in range(self.y_blocks)] for _ in range(self.x_blocks)]
self.non_blocks = self._gen_non_blocks()
pygame.display.update()
self.set_bg_color(bg_color)
def set_bg_color(self, color=(0, 0, 0)):
self.screen.fill(color)
pygame.display.update()
def _gen_non_blocks(self):
non_blocks = []
for i in range(0, self.x_blocks):
for j in range(0, self.y_blocks):
non_blocks.append((i, j))
return non_blocks
# 显示网格线
def show_pods(self, color=(255, 255, 255)):
start_pos_x, start_pos_y = self.origin_x, self.origin_y
end_pos_x, end_pos_y = self.origin_x, (self.block_width + 1) * self.y_blocks + self.origin_y
# 先画竖线
for c_index in range(0, self.x_blocks + 1):
pygame.draw.line(self.screen, color, (start_pos_x, start_pos_y), (end_pos_x, end_pos_y), 1)
start_pos_x = end_pos_x = start_pos_x + 1 + self.block_width
start_pos_x, start_pos_y = self.origin_x, self.origin_y
end_pos_x, end_pos_y = self.origin_x + (self.block_width + 1) * self.x_blocks, self.origin_y
# 画横线
for r_index in range(0, self.y_blocks + 1):
pygame.draw.line(self.screen, color, (start_pos_x, start_pos_y), (end_pos_x, end_pos_y), 1)
start_pos_y = end_pos_y = start_pos_y + 1 + self.block_width
pygame.display.flip()
def show_wall(self, color=(255, 0, 0)):
start_pos_x, start_pos_y = self.origin_x, self.origin_y
end_pos_x, end_pos_y = self.origin_x + (self.block_width + 1) * self.x_blocks, \
(self.block_width + 1) * self.y_blocks + self.origin_y
pygame.draw.line(self.screen, color, (start_pos_x, start_pos_y), (end_pos_x, start_pos_y))
pygame.draw.line(self.screen, color, (start_pos_x, start_pos_y), (start_pos_x, end_pos_y))
pygame.draw.line(self.screen, color, (end_pos_x, start_pos_y), (end_pos_x, end_pos_y))
pygame.draw.line(self.screen, color, (start_pos_x, end_pos_y), (end_pos_x, end_pos_y))
pygame.display.flip()
def draw_block(self, x, y, color=(111, 111, 111)):
pos_x = self.origin_x + x * (self.block_width + 1) + 1
pos_y = self.origin_y + y * (self.block_width + 1) + 1
pygame.draw.rect(self.screen, color, (pos_x, pos_y, self.block_width, self.block_width), 0)
pygame.display.update((pos_x, pos_y, self.block_width, self.block_width))
def set_block(self, pos, status):
old_status = self.blocks_status[pos[0]][pos[1]]
if old_status == status:
return
self.blocks_status[pos[0]][pos[1]] = status
if old_status == self.NONE:
self.non_blocks.remove(pos)
if status == self.NONE:
self.non_blocks.append(pos)
def get_status(self, pos):
x, y = pos[0], pos[1]
if x < 0 or x >= self.x_blocks or y < 0 or y > self.y_blocks:
return self.WALL
return self.blocks_status[x][y]
def gen_food(self, color=(255, 0, 0)):
index = random.randint(0, len(self.non_blocks) - 1)
block_pos = self.non_blocks[index]
pos_x = self.origin_x + block_pos[0] * (self.block_width + 1) + 1
pos_y = self.origin_y + block_pos[1] * (self.block_width + 1) + 1
rect = (pos_x, pos_y, self.block_width, self.block_width)
self.set_block(block_pos, self.FOOD)
pygame.draw.rect(self.screen, color, rect, 0)
pygame.display.update(rect)
def show_score_and_speed(self):
pass
def show_game_over(self):
pass
def show_pause(self):
pass
def show_start(self):
pass
| coderwf/pygames | glutsnake/board.py | board.py | py | 4,748 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "pygame.dis... |
9580721700 | """
Tomasulo main module.
"""
import argparse
import logger
from machine import Machine
def main():
"""
Main entry point.
Parses command line argument and begins execution.
:return: None
"""
parser = argparse.ArgumentParser(description='Simulate execution of DLX code on a Tomasulo processor.')
parser.add_argument('-f', type=str, dest='filename', required=True, help='The input file. Must be a .hex file.')
parser.add_argument('-v', '--verbose', action='store_true', help='Increase output verbosity.')
args = parser.parse_args()
logger.setLogLevel(1 if args.verbose else 0)
# Run
machine = Machine()
machine.loadProgram(args.filename)
machine.run()
if __name__ == '__main__':
main()
| kaledj/TomasuloSim | tomasulo.py | tomasulo.py | py | 751 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logger.setLogLevel",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "machine.Machine",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "machine.lo... |
5489764282 | """
Module that provides different readers for trajectory files.
It also provides a common interface layer between the file IO packages,
namely pygmx and mdanalysis, and mdevaluate.
"""
from .checksum import checksum
from .logging import logger
from . import atoms
from functools import lru_cache
from collections import namedtuple
import os
from os import path
from array import array
from zipfile import BadZipFile
import builtins
import warnings
import numpy as np
from scipy import sparse
from dask import delayed, __version__ as DASK_VERSION
try:
import pygmx
from pygmx.errors import InvalidMagicException, InvalidIndexException, FileTypeError
PYGMX_AVAILABLE = True
except ImportError:
PYGMX_AVAILABLE = False
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
import MDAnalysis as mdanalysis
MADANALYSIS_AVAILABLE = True
except ImportError:
MADANALYSIS_AVAILABLE = False
assert PYGMX_AVAILABLE or MADANALYSIS_AVAILABLE, 'Could not import any file IO package; make sure too install either pygmx or mdanalysis.'
class NojumpError(Exception):
pass
class NoReaderAvailabelError(Exception):
pass
def open_with_mdanalysis(topology, trajectory, cached=False):
"""Open a the topology and trajectory with mdanalysis."""
uni = mdanalysis.Universe(topology, trajectory, convert_units=False)
if cached is not False:
if cached is True:
maxsize = 128
else:
maxsize = cached
reader = CachedReader(uni.trajectory, maxsize)
else:
reader = BaseReader(uni.trajectory)
reader.universe = uni
atms = atoms.Atoms(
np.stack((uni.atoms.resids, uni.atoms.resnames, uni.atoms.names), axis=1),
charges=uni.atoms.charges, masses=uni.atoms.masses
).subset()
return atms, reader
def open_with_pygmx(topology, trajectory, cached=False, reindex=False,
ignore_index_timestamps=False, index_file=None):
"""Open a topology and trajectory with pygmx."""
try:
rd = pygmx.open(trajectory, ignore_index_timestamps=ignore_index_timestamps)
except InvalidMagicException:
raise InvalidIndexException('This is not a valid index file: {}'.format(trajectory))
except InvalidIndexException:
if reindex:
try:
os.remove(pygmx.index_filename_for_xtc(trajectory))
except FileNotFoundError:
pass
rd = pygmx.open(trajectory)
else:
raise InvalidIndexException('Index file is invalid, us reindex=True to regenerate.')
if cached is not False:
if isinstance(cached, bool):
maxsize = 128
else:
maxsize = cached
reader = CachedReader(rd, maxsize)
else:
reader = BaseReader(rd)
if topology.endswith('.tpr'):
atms = atoms.from_tprfile(topology, index_file=index_file)
elif topology.endswith('.gro'):
atms = atoms.from_grofile(topology, index_file=index_file)
return atms, reader
def open(topology, trajectory, cached=False, index_file=None, reindex=False, ignore_index_timestamps=False):
"""
Open a trajectory file with the apropiate reader.
Args:
filename (str):
Trajectory file to open, the reader will be chosen
according to the file extension.
cached (opt.):
If Reader should be cached with lru_cache. If this is True, maxsize for
the cache is 128, otherwise the argument is passed as maxsize.
Use cached=None to get an unbound cache.
reindex (opt.): Regenerate the index of the xtc-file
nojump (opt.): If nojump matrixes should be generated.
"""
if PYGMX_AVAILABLE and trajectory.endswith('.xtc') and topology.endswith(('.tpr', '.gro')):
return open_with_pygmx(topology, trajectory, cached=cached, reindex=reindex,
ignore_index_timestamps=ignore_index_timestamps, index_file=index_file)
elif MADANALYSIS_AVAILABLE:
return open_with_mdanalysis(topology, trajectory, cached)
else:
raise NoReaderAvailabelError('No reader package found, install pygmx or mdanalysis.')
def is_writeable(fname):
"""Test if a directory is actually writeable, by writing a temporary file."""
fdir = os.path.dirname(fname)
ftmp = os.path.join(fdir, str(np.random.randint(999999999)))
while os.path.exists(ftmp):
ftmp = os.path.join(fdir, str(np.random.randint(999999999)))
if os.access(fdir, os.W_OK):
try:
with builtins.open(ftmp, 'w'):
pass
os.remove(ftmp)
return True
except PermissionError:
pass
return False
def nojump_filename(reader):
directory, fname = path.split(reader.filename)
fname = path.join(directory, '.{}.nojump.npz'.format(fname))
if os.path.exists(fname) or is_writeable(directory):
return fname
else:
fname = os.path.join(
os.path.join(os.environ['HOME'], '.mdevaluate/nojump'),
directory.lstrip('/'),
'.{}.nojump.npz'.format(fname)
)
logger.info('Saving nojump to {}, since original location is not writeable.'.format(fname))
os.makedirs(os.path.dirname(fname), exist_ok=True)
return fname
CSR_ATTRS = ('data', 'indices', 'indptr')
NOJUMP_MAGIC = 2016
def parse_jumps(trajectory):
prev = trajectory[0].whole
box = prev.box.diagonal()
SparseData = namedtuple('SparseData', ['data', 'row', 'col'])
jump_data = (
SparseData(data=array('b'), row=array('l'), col=array('l')),
SparseData(data=array('b'), row=array('l'), col=array('l')),
SparseData(data=array('b'), row=array('l'), col=array('l'))
)
for i, curr in enumerate(trajectory):
if i % 500 == 0:
logger.debug('Parse jumps Step: %d', i)
delta = ((curr - prev) / box).round().astype(np.int8)
prev = curr
for d in range(3):
col, = np.where(delta[:, d] != 0)
jump_data[d].col.extend(col)
jump_data[d].row.extend([i] * len(col))
jump_data[d].data.extend(delta[col, d])
return jump_data
def generate_nojump_matrixes(trajectory):
"""
Create the matrixes with pbc jumps for a trajectory.
"""
logger.info('generate Nojump Matrixes for: {}'.format(trajectory))
jump_data = parse_jumps(trajectory)
N = len(trajectory)
M = len(trajectory[0])
trajectory.frames.nojump_matrixes = tuple(
sparse.csr_matrix((np.array(m.data), (m.row, m.col)), shape=(N, M)) for m in jump_data
)
save_nojump_matrixes(trajectory.frames)
def save_nojump_matrixes(reader, matrixes=None):
if matrixes is None:
matrixes = reader.nojump_matrixes
data = {'checksum': checksum(NOJUMP_MAGIC, checksum(reader))}
for d, mat in enumerate(matrixes):
data['shape'] = mat.shape
for attr in CSR_ATTRS:
data['{}_{}'.format(attr, d)] = getattr(mat, attr)
np.savez(nojump_filename(reader), **data)
def load_nojump_matrixes(reader):
zipname = nojump_filename(reader)
try:
data = np.load(zipname)
except (AttributeError, BadZipFile, OSError):
# npz-files can be corrupted, propably a bug for big arrays saved with savez_compressed?
logger.info('Removing zip-File: %s', zipname)
os.remove(nojump_filename(reader))
return
try:
if data['checksum'] == checksum(NOJUMP_MAGIC, checksum(reader)):
reader.nojump_matrixes = tuple(
sparse.csr_matrix(
tuple(data['{}_{}'.format(attr, d)] for attr in CSR_ATTRS),
shape=data['shape']
)
for d in range(3)
)
logger.info('Loaded Nojump Matrixes: {}'.format(nojump_filename(reader)))
else:
logger.info('Invlaid Nojump Data: {}'.format(nojump_filename(reader)))
except KeyError:
logger.info('Removing zip-File: %s', zipname)
os.remove(nojump_filename(reader))
return
def correct_nojump_matrixes_for_whole(trajectory):
reader = trajectory.frames
frame = trajectory[0]
box = frame.box.diagonal()
cor = ((frame - frame.whole) / box).round().astype(np.int8)
for d in range(3):
reader.nojump_matrixes[d][0] = cor[:, d]
save_nojump_matrixes(reader)
class BaseReader:
"""Base class for trajectory readers."""
@property
def filename(self):
return self.rd.filename
@property
def nojump_matrixes(self):
if self._nojump_matrixes is None:
raise NojumpError('Nojump Data not available: {}'.format(self.filename))
return self._nojump_matrixes
@nojump_matrixes.setter
def nojump_matrixes(self, mats):
self._nojump_matrixes = mats
def __init__(self, rd):
"""
Args:
filename: Trajectory file to open.
reindex (bool, opt.): If True, regenerate the index file if necessary.
"""
self.rd = rd
self._nojump_matrixes = None
if path.exists(nojump_filename(self)):
load_nojump_matrixes(self)
def __getitem__(self, item):
return self.rd[item]
def __len__(self):
return len(self.rd)
def __checksum__(self):
if hasattr(self.rd, 'cache'):
# Has an pygmx reader
return checksum(self.filename, str(self.rd.cache))
elif hasattr(self.rd, '_xdr'):
# Has an mdanalysis reader
cache = array('L', self.rd._xdr.offsets.tobytes())
return checksum(self.filename, str(cache))
class CachedReader(BaseReader):
"""A reader that has a least-recently-used cache for frames."""
@property
def cache_info(self):
"""Get Information about the lru cache."""
return self._get_item.cache_info()
def clear_cache(self):
"""Clear the cache of the frames."""
self._get_item.cache_clear()
def __init__(self, rd, maxsize):
"""
Args:
filename (str): Trajectory file that will be opened.
maxsize: Maximum size of the lru_cache or None for infinite cache.
"""
super().__init__(rd)
self._get_item = lru_cache(maxsize=maxsize)(self._get_item)
def _get_item(self, item):
"""Buffer function for lru_cache, since __getitem__ can not be cached."""
return super().__getitem__(item)
def __getitem__(self, item):
return self._get_item(item)
if DASK_VERSION >= '0.15.0':
read_xtcframe_delayed = delayed(pure=True, traverse=False)(pygmx.read_xtcframe)
else:
read_xtcframe_delayed = delayed(pure=True)(pygmx.read_xtcframe)
class DelayedReader(BaseReader):
@property
def filename(self):
if self.rd is not None:
return self.rd.filename
else:
return self._filename
def __init__(self, filename, reindex=False, ignore_index_timestamps=False):
super().__init__(filename, reindex=False, ignore_index_timestamps=False)
self.natoms = len(self.rd[0].coordinates)
self.cache = self.rd.cache
self._filename = self.rd.filename
self.rd = None
def __len__(self):
return len(self.cache)
def _get_item(self, frame):
return read_xtcframe_delayed(self.filename, self.cache[frame], self.natoms)
def __getitem__(self, frame):
return self._get_item(frame)
class EnergyReader:
"""A reader for Gromacs energy files."""
def __init__(self, edrfile):
"""
Args:
edrfile: Filename of the energy file
topology (opt.): Filename of the topology, speeds up file io since the length of the energy file is known
"""
edr = pygmx.open(edrfile)
self.time, data = edr.read()
self.types, self.units = zip(*edr.types)
self.data = data.T
def __getitem__(self, type):
"""
Get time series of an energy type.
"""
if type in self.types:
return self.data[self.types.index(type)]
else:
raise KeyError('Energy type {} not found in Energy File.'.format(type))
| mdevaluate/mdevaluate | mdevaluate/reader.py | reader.py | py | 12,347 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "warnings.catch_warnings",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "warnings.simplefilter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "MDAnalysis.Universe",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "num... |
8909257357 | import numpy as np
import cv2
import os
from PIL import Image
X = 10 # 0
Y = 105 # 95
WIDTH = 215 # 356
HEIGHT = 440 # 440
def process_img(original_img):
processed_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
processed_img = cv2.resize(processed_img, (10, 20))
return processed_img
def grab_screen():
if not os.path.isfile('./image_data/FIFO'):
os.mkfifo('./image_data/FIFO')
os.system('screencapture -x -tjpg -R{},{},{},{} ./image_data/FIFO'.format(X, Y, WIDTH, HEIGHT))
with Image.open('./image_data/FIFO') as fifo:
screen = fifo
processed_screen = process_img(np.array(screen))
return processed_screen
| sebastianandreasson/tetris_tensorflow | grab_screen.py | grab_screen.py | py | 671 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.cvtColor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
... |
3721769844 | import io
import time
from openpyxl import Workbook
import openpyxl as O
import speedtest
import re
import sys
import psycopg2
from psycopg2 import Error
from datetime import datetime
import hashlib
from .connection import Connection
cn=Connection()
class SpeedPage():
def asking_name(self):
print("Введите ваше ИМЯ:")
author=input()
return author
def asking_serial_number(self):
print("Введите ваш Серийный номер:")
s=input()
serialno=s.replace("S/N: ","")
return serialno
def id_of_test(self):
try:
connection = psycopg2.connect(user="dssadmin",
password="dssadmin",
host="10.10.2.180",
port="5432",
database="devices")
cursor = connection.cursor()
cursor.execute(""" SELECT MAX(testId) FROM testhistory """)
result = cursor.fetchall()
for i in result:
testId = i[0] + 1
except (Exception, Error) as error:
print("Ошибка при работе с PostgreSQL", error)
finally:
if connection:
cursor.close()
connection.close()
return testId
def port_number(self):
print("Какой это порт:")
port_number=input()
return port_number
def speedtest_database(self,author,serialno,testId,pretty_serialno,port_number,j):
for i in range(1,3):
current_date = datetime.now().strftime("%Y-%m-%d")
current_time = datetime.now().strftime("%H:%M:%S")
start_time=datetime.now()
st=speedtest.Speedtest(secure=True)
st.get_best_server()
download_number=st.download()
print("Ваша входящяя скорость:", end=' ')
download_speed=self.test_download_test(download_number)
print(download_speed)
upload_number=st.upload()
print("Ваша исходящяя скорость:", end=' ')
upload_speed=self.test_upload_test(upload_number)
print(upload_speed)
stop_time=datetime.now()
duration=str((stop_time - start_time).seconds)+' сек'
self.excel_uploading(download_speed,upload_speed,j,pretty_serialno,port_number)
print("Количество времени потраченная на тест:", end=' ')
print(duration)
print("Тест номер:", end=' ')
print(testId)
cn.connect_to_database(author,serialno,testId,download_speed,upload_speed,
duration,current_date,current_time,port_number)
def pretty_speed(self,speed):
unit = 'bps'
kmg = ['', 'K', 'M', 'G']
i = 0
while speed >= 1000:
speed /= 1000
i += 1
return "{:.2f}".format(speed) + ' ' + kmg[i] + unit
def speed_measure(self,speed):
i=0
while speed >= 1000:
speed /= 1000
i+=1
return speed
def pretty_file_format(self,serialno):
encoding='.xlsx'
return serialno+encoding
def test_download_test(self,download_number):
download_speed=self.speed_measure(download_number)
download_beauty_speed = self.pretty_speed(download_number)
if(download_speed<100):
print("FAIL", end=' ')
return download_beauty_speed
def test_upload_test(self,upload_number):
upload_speed=self.speed_measure(upload_number)
upload_beauty_speed = self.pretty_speed(upload_number)
if(upload_speed<100):
print("FAIL", end=' ')
return upload_beauty_speed
def create_excel_file(self,serialno):
wb=Workbook()
ws=wb.active
wb.save(filename=serialno)
def excel_uploading(self,download_speed,upload_speed,i,serialno,port_number):
Excel_file=serialno
Excel_worksheet="Sheet"
i=i+1
wb=O.load_workbook(Excel_file)
ws=wb[Excel_worksheet]
ws.cell(i,1).value=port_number
ws.cell(1,1).value='Port Number'
ws.cell(1,2).value='Download Speed'
ws.cell(1,3).value='Upload Speed'
ws.cell(i,2).value=download_speed
ws.cell(i,3).value=upload_speed
wb.save(Excel_file)
wb.close()
| Astarota/SpeedTestCLI | pages/speed_page.py | speed_page.py | py | 3,737 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "connection.Connection",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "connection.cursor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "psycopg2.Err... |
6748902164 | import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
import keyboard
import os
import pandas as pd
train_data_path = os.path.join('artifacts', "attendance.csv")
os.makedirs(os.path.dirname(train_data_path), exist_ok=True)
columns =['Name','Time']
test = []
train = pd.DataFrame(test, columns=columns)
train.to_csv(train_data_path, index=False)
def take_attendance():
path = 'uploads'
images = []
class_names = []
my_list = os.listdir(path)
print(my_list)
for cl in my_list:
cur_image = cv2.imread(f'{path}/{cl}')
images.append(cur_image)
class_names.append(os.path.splitext(cl)[0])
print(class_names)
def find_encodings(images):
encode_list = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encode_list.append(encode)
return encode_list
def mark_attendance(name):
with open('artifacts/attendance.csv','r+') as f:
my_data_list = f.readlines()
name_list = []
for line in my_data_list:
entry = line.split(',')
name_list.append(entry[0])
if name not in name_list:
now = datetime.now()
dt_string = now.strftime('%H:%M:%S')
f.writelines(f'\n{name}, {dt_string}')
encode_list_known = find_encodings(images)
print("Encoding complete")
cap = cv2.VideoCapture(0)
web=True
while web:
success, img = cap.read()
img_s = cv2.resize(img, (0,0), None, 0.25,0.25)
img_s = cv2.cvtColor(img_s, cv2.COLOR_BGR2RGB)
faces_cur_frame = face_recognition.face_locations(img_s)
encode_cur_frame = face_recognition.face_encodings(img_s, faces_cur_frame)
for encode_face, face_loc in zip(encode_cur_frame, faces_cur_frame):
matches = face_recognition.compare_faces(encode_list_known, encode_face)
face_dis = face_recognition.face_distance(encode_list_known, encode_face)
print(face_dis)
match_index = np.argmin(face_dis)
if matches[match_index]:
name = class_names[match_index].upper()
print(name)
y1,x2,y2,x1 = face_loc
y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
cv2.rectangle(img, (x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img, (x1,y2-35),(x2,y2),(0,255,0), cv2.FILLED)
cv2.putText(img, name, (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
mark_attendance(name)
cv2.imshow('Webcam',img)
cv2.waitKey(1)
if keyboard.is_pressed('q'):
web=False # if key 'q' is pressed
cv2.destroyAllWindows()
| aruneer007/attendance | face.py | face.py | py | 2,859 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_num... |
6159961636 | import json
a = {
"name": "ivo",
"age": "22"
}
def serialize_to(path, data):
json_string = json.dumps(a, indent=4)
with open(file, "w") as f:
f.write(json_string)
def unserialize_from(path):
with open(path, "r") as f:
contents = f.read()
return json.loads(contents)
| Vencislav-Dzhukelov/101-3 | week3/3-Panda-Social-Network/panda_json.py | panda_json.py | py | 316 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 19,
"usage_type": "call"
}
] |
35492004084 | from ninja import Router
from ninja import NinjaAPI, File
from ninja.files import UploadedFile
from django.http import HttpResponse
from RECOGNIZE.text_reader import OCR_Reader
import io
import PIL.Image as Image
import cv2
import os
import time
import json
import uuid
import requests
router = Router()
path = __file__
splited = path.split("/")
path=""
for i in splited[1:-1]:
path += "/"+i
@router.post("/recognize")
def Recognize_Plate(request,file: UploadedFile = File(...)):
try:
# print(url)
# experiment_id = str(time.strftime("%Y-%m-%d_%H-%M-%S"))
# os.system("wget "+url+" -O /root/LACTURE/PLATNOMER_RECOGNIZE/RECOGNIZE/img/"+experiment_id+".jpg")
# image = cv2.imread("/root/LACTURE/PLATNOMER_RECOGNIZE/RECOGNIZE/img/"+experiment_id+".jpg")
data = file.read()
image = Image.open(io.BytesIO(data))
uuids = str(uuid.uuid4())
image.save(path+"/img/"+uuids+".png")
############## SAVE #################
img = Image.open(path+"/img/"+uuids+".png")
box = (600, 300, 1100, 700)
img2 = img.crop(box)
img2.save(path+"/croping/"+uuids+".png")
############## CROP #################
imageread = cv2.imread(path+"/croping/"+uuids+".png")
reader = OCR_Reader(False)
image, text, boxes = reader.read_text(imageread)
return {
"message":"success",
"data" : text,
"name" : uuids+".png"
}
except BaseException as err:
print(str(err))
return {
"message" : "error"
}
@router.get("/img/nocrop/{name}")
def ImgaeNoCrop(request,name:str):
try:
with open(path+"/img/"+name, 'rb') as image_file:
# Read the image content
image_data = image_file.read()
# Set the content type header
response = HttpResponse(content_type='image/jpeg')
# Set the content of the response to the image data
response.write(image_data)
return response
except BaseException as err:
return {
"message" : "Internal server error"
}
@router.get("/img/crop/{name}")
def ImgaeNoCrop(request,name:str):
try:
with open(path+"/croping/"+name, 'rb') as image_file:
# Read the image content
image_data = image_file.read()
# Set the content type header
response = HttpResponse(content_type='image/jpeg')
# Set the content of the response to the image data
response.write(image_data)
return response
except BaseException as err:
return {
"message" : "Internal server error"
}
| fakhrilak/image_recognize | RECOGNIZE/index.py | index.py | py | 2,687 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ninja.Router",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ninja.files.UploadedFile",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "ninja.File",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
... |
5446974377 | import numpy as np
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
def func2(x):
return np.round(np.random.random())
def func(x,y,z,r):
l = np.linalg.norm(np.array([x,y,z]))
if(l < r):
return 1.0
else:
return 0.0
def normalize_signal_1d(signal):
nsignal = np.zeros(signal.shape[0], dtype=complex)
s0 = signal.max()
for i in range(signal.shape[0]):
nsignal[i] = signal[i]/s0
return nsignal
def normalize_signal_2d(signal):
nsignal = np.zeros([signal.shape[0], signal.shape[1]], dtype=complex)
s0 = signal.max()
for i in range(signal.shape[0]):
for j in range(signal.shape[1]):
nsignal[i,j] = signal[i,j]/s0
return nsignal
def normalize_signal_3d(signal):
nsignal = np.zeros([signal.shape[0], signal.shape[1], signal.shape[2]], dtype=complex)
s0 = signal.max()
for k in range(signal.shape[2]):
for i in range(signal.shape[0]):
for j in range(signal.shape[1]):
nsignal[i,j,k] = signal[i,j,k]/s0
return nsignal
def plot_fft_1d_results(vecX, signal, vecK, dft, fft):
points = vecX.shape[0]
fig, axs = plt.subplots(6, 1)
axs[0].plot(vecX, signal,'o')
axs[1].plot(vecK, np.real(dft),'-')
axs[1].plot(vecK, np.imag(dft),'--')
axs[2].plot(vecK, np.real(fft),'-')
axs[2].plot(vecK, np.imag(fft),'--')
axs[3].plot(vecK, np.abs(dft),'-')
axs[4].plot(vecK, np.abs(fft),'-')
axs[5].plot(vecK, np.abs(dft),'-')
axs[5].plot(vecK, np.abs(fft),'--')
plt.show()
return
def plot_fft_2d_results(signal, dft, fft):
diff_abs = np.abs(dft)-np.abs(fft)
diff_real = np.real(dft)-np.real(fft)
diff_imag = np.imag(dft)-np.imag(fft)
cmap = cm.PRGn
cmap=cm.get_cmap(cmap)
points = signal.shape[0]
fig, axs = plt.subplots(3, 2)
im00 = axs[0,0].imshow(np.abs(signal), cmap=cmap)
im10 = axs[1,0].imshow(np.abs(dft), cmap=cmap)
im20 = axs[2,0].imshow(np.abs(fft), cmap=cmap)
im01 = axs[0,1].imshow(diff_real, cmap=cmap)
im11 = axs[1,1].imshow(diff_imag, cmap=cmap)
im21 = axs[2,1].imshow(diff_abs, cmap=cmap)
axs[0,0].set_title("signal")
axs[1,0].set_title("dft")
axs[2,0].set_title("fft")
axs[0,1].set_title("real(diff)")
axs[1,1].set_title("imag(diff)")
axs[2,1].set_title("abs(diff)")
fig.colorbar(im00, ax=axs[0,0])
fig.colorbar(im10, ax=axs[1,0])
fig.colorbar(im20, ax=axs[2,0])
fig.colorbar(im01, ax=axs[0,1])
fig.colorbar(im11, ax=axs[1,1])
fig.colorbar(im21, ax=axs[2,1])
fig.tight_layout()
plt.show()
return
def plot_fft_3d_results(signal, dft, fft, nimgs=1):
diff_abs = np.abs(dft)-np.abs(fft)
diff_real = np.real(dft)-np.real(fft)
diff_imag = np.imag(dft)-np.imag(fft)
cmap = cm.PRGn
cmap=cm.get_cmap(cmap)
points = signal.shape[0]
fig, axs = plt.subplots(nimgs, 6)
img_list = []
for im in range(nimgs):
im00 = axs[im,0].imshow(np.abs(signal[im]), cmap=cmap)
im10 = axs[im,1].imshow(np.abs(dft[im]), cmap=cmap)
im20 = axs[im,2].imshow(np.abs(fft[im]), cmap=cmap)
im01 = axs[im,3].imshow(diff_real[im], cmap=cmap)
im11 = axs[im,4].imshow(diff_imag[im], cmap=cmap)
im21 = axs[im,5].imshow(diff_abs[im], cmap=cmap)
fig.colorbar(im00, ax=axs[im,0])
fig.colorbar(im10, ax=axs[im,1])
fig.colorbar(im20, ax=axs[im,2])
fig.colorbar(im01, ax=axs[im,3])
fig.colorbar(im11, ax=axs[im,4])
fig.colorbar(im21, ax=axs[im,5])
axs[0,0].set_title("signal")
axs[0,1].set_title("dft")
axs[0,2].set_title("fft")
axs[0,3].set_title("real(diff)")
axs[0,4].set_title("imag(diff)")
axs[0,5].set_title("abs(diff)")
for im in range(nimgs):
for col in range(6):
axs[im,col].grid(False)
# Hide axes ticks
axs[im,col].set_xticks([])
axs[im,col].set_yticks([])
# fig.tight_layout()
plt.show()
return
def apply_dft_1d(signal, vecx, veck, length, points):
kspec = np.zeros(points, dtype=complex)
dX = length / (points)
for i in range(points):
gsum = 0.0
for rx in range(points):
gsum += dX * signal[rx] * np.exp((-1.0j) * veck[i] * vecx[rx])
kspec[i] = (1.0 / points) * gsum
return kspec
def apply_dft_2d(signal, vecx, vecy, veckx, vecky, area, points):
kspec = np.zeros([points, points], dtype=complex)
dA = area / (points**2)
for i in range(points):
for j in range(points):
gsum = 0.0
for ry in range(points):
for rx in range(points):
gsum += dA * signal[ry,rx] * np.exp((-1.0j) * (veckx[j] * vecx[rx] + vecky[i] * vecy[ry]))
kspec[i,j] = (1.0 / area) * gsum
return kspec
def apply_dft_3d(signal, vecx, vecy, vecz, veckx, vecky, veckz, volume, points):
kspec = np.zeros([points, points, points], dtype=complex)
dV = volume / (points**3)
elems = points**3
count = 0
for k in range(points):
for i in range(points):
for j in range(points):
count += 1
print(":: {} fourier coefficient out of {}.".format(count, elems))
gsum = 0.0
for rz in range(points):
for ry in range(points):
for rx in range(points):
gsum += dV * signal[ry,rx,rz] * np.exp((-1.0j) * (veckx[j] * vecx[rx] + vecky[i] * vecy[ry] + veckz[k] * vecz[rz]))
kspec[i,j, k] = (1.0 / volume) * gsum
return kspec
def apply_fft_1d(signal):
kspec = np.fft.fft(signal, norm='ortho')
kspec = np.fft.fftshift(kspec)
return kspec
def apply_fft_2d(signal):
kspec = np.fft.fft2(signal, norm='ortho')
kspec = np.fft.fftshift(kspec)
return kspec
def apply_fft_3d(signal):
kspec = np.fft.fftn(signal, norm='ortho')
kspec = np.fft.fftshift(kspec)
return kspec
def test_fft1D():
N = 256
a = 1.0
size = 2*N + 1
signal = np.zeros(size)
Xfreq = 0.5*a / float(N)
# [x, y, z] = np.meshgrid(rfreq * Nrange, rfreq * Nrange, rfreq * Nrange)
vecX = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
Kfreq = 2*np.pi/a
[vecK] = np.meshgrid(Kfreq*np.arange(-N, N+1))
# for i in range(size):
# signal[i] = func(vecX[i])
for i in range(size//4):
signal[size//2-i] = 1.0
signal[size//2+i] = 1.0
dft_kspec = apply_dft_1d(signal, vecX, vecK, a, size)
fft_kspec = apply_fft_1d(signal)
plot_fft_1d_results(vecX, signal, vecK, dft_kspec, fft_kspec)
norm_fft_kspec = normalize_signal(fft_kspec)
norm_dft_kspec = normalize_signal(dft_kspec)
plot_fft_1d_results(vecX, signal, vecK, norm_dft_kspec, norm_fft_kspec)
return
def test_fft2D():
N = 16
a = 1.0
area = a**2
size = 2*N + 1
signal = np.zeros([size, size])
Xfreq = 0.5*a / float(N)
# [x, y, z] = np.meshgrid(rfreq * Nrange, rfreq * Nrange, rfreq * Nrange)
vecX = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
vecY = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
Kfreq = 2*np.pi/a
[vecKX] = np.meshgrid(Kfreq*np.arange(-N, N+1))
[vecKY] = np.meshgrid(Kfreq*np.arange(-N, N+1))
for i in range(size):
for j in range(size):
signal[i,j] = func(vecX[i])
for i in range(size//4):
for j in range(size//4):
signal[size//2-i, size//2-j] = 1.0
signal[size//2+i, size//2-j] = 1.0
dft_kspec = apply_dft_2d(signal, vecX, vecY, vecKX, vecKY, area, size)
fft_kspec = apply_fft_2d(signal)
plot_fft_2d_results(signal, dft_kspec, fft_kspec)
norm_fft_kspec = normalize_signal_2d(fft_kspec)
norm_dft_kspec = normalize_signal_2d(dft_kspec)
plot_fft_2d_results(signal, norm_dft_kspec, norm_fft_kspec)
return
def test_fft3D():
N = 6
a = 1.0
volume = a**3
size = 2*N + 1
signal = np.zeros([size, size, size])
Xfreq = 0.5*a / float(N)
# [x, y, z] = np.meshgrid(rfreq * Nrange, rfreq * Nrange, rfreq * Nrange)
vecX = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
vecY = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
vecZ = np.linspace(-0.5*a, 0.5*a, 2*size + 1)[1:2*size:2]
Kfreq = 2*np.pi/a
[vecKX] = np.meshgrid(Kfreq*np.arange(-N, N+1))
[vecKY] = np.meshgrid(Kfreq*np.arange(-N, N+1))
[vecKZ] = np.meshgrid(Kfreq*np.arange(-N, N+1))
for k in range(size):
for i in range(size):
for j in range(size):
signal[i,j, k] = func(vecX[j], vecY[i], vecZ[k], a/2)
for k in range(size//4):
for i in range(size//4):
for j in range(size//4):
signal[size//2-i, size//2-j, size//2-k] = 1.0
signal[size//2+i, size//2-j, size//2+k] = 1.0
dft_kspec = apply_dft_3d(signal, vecX, vecY, vecZ, vecKX, vecKY, vecKZ, volume, size)
fft_kspec = apply_fft_3d(signal)
plot_fft_3d_results(signal, dft_kspec, fft_kspec, size)
norm_fft_kspec = normalize_signal_3d(fft_kspec)
norm_dft_kspec = normalize_signal_3d(dft_kspec)
plot_fft_3d_results(signal, norm_dft_kspec, norm_fft_kspec, size)
return | mcastrorib/bergman_periodic_solution | python/fft_test.py | fft_test.py | py | 9,440 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.round",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
... |
18101264424 | from typing import List, Tuple
from unittest import TestCase, main
class Solution:
def longestPalindrome(self, s: str) -> str:
def func(left: int, right: int, longest: str) -> str:
"""returns the longest palindromic substring using left and right index"""
longest_length = len(longest)
while 0 <= left and right < l and s[left] == s[right]:
length = right - left + 1
if longest_length < length:
longest = s[left : right + 1]
longest_length = length
left -= 1
right += 1
return longest
l = len(s)
longest = s[0]
for i in range(l):
longest = func(i - 1, i + 1, longest)
longest = func(i, i + 1, longest)
return longest
class Test(TestCase):
s = Solution()
data: List[Tuple[str, str]] = [
("cbbd", "bb"),
("aaaa", "aaaa"),
("babad", "bab"),
]
def test_solution(self):
for input, expected in self.data:
self.assertEqual(self.s.longestPalindrome(input), expected)
if __name__ == "__main__":
main()
| hirotake111/leetcode_diary | leetcode/longest_palindromic_substring/solution.py | solution.py | py | 1,187 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_... |
35010781963 | from grafo.Grafo import *
from collections import deque
class EdmondsKarp:
def __init__(self, grafo: Grafo) -> None:
self.grafo = grafo
self.fluxo = {arco: 0 for arco in self.grafo.arcos.values()}
self.fluxo = {}
for arco in self.grafo.arcos.values():
self.fluxo[(arco.vertice1.indice, arco.vertice2.indice)] = 0
self.fluxo[(arco.vertice2.indice, arco.vertice1.indice)] = 0
for arco in list(self.grafo.arcos.values()):
if (arco.vertice2.indice, arco.vertice1.indice) not in self.grafo.arcos:
self.grafo.arcos[(arco.vertice2.indice, arco.vertice1.indice)] = Arco(arco.vertice2, arco.vertice1, 0)
arco.vertice2.vizinhos_saintes.append(arco.vertice1)
def buscaLargura(self, s: int, t: int) -> dict:
parent = {s: None}
queue = deque([s])
while queue:
u = queue.popleft()
for v in self.grafo.vizinhos_saintes(u):
if v.indice not in parent and self.grafo.arcos[(u, v.indice)].peso - self.fluxo[(u, v.indice)] > 0:
parent[v.indice] = u
if v.indice == t:
return parent
queue.append(v.indice)
return None
def execute(self) -> None:
self.processarAlgoritmo()
self.imprimir()
def processarAlgoritmo(self) -> None:
s = 1
t = len(self.grafo.vertices)
max_flow = 0
while True:
parent = self.buscaLargura(s, t)
if parent is None:
break
path_flow = float("Inf")
v = t
while v != s:
u = parent[v]
path_flow = min(path_flow, self.grafo.arcos[(u, v)].peso - self.fluxo[(u, v)])
v = u
max_flow += path_flow
v = t
while v != s:
u = parent[v]
self.fluxo[(u, v)] += path_flow
self.fluxo[(v, u)] -= path_flow
v = u
self.max_flow = max_flow
def imprimir(self) -> None:
print("O fluxo máximo possível é %d" % self.max_flow)
| jdanprad0/INE5413-Grafos | Atividade-03-Grafos/algoritmos_t3/edmondsKarp/EdmondsKarp.py | EdmondsKarp.py | py | 2,190 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "grafo.Grafo",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 22,
"usage_type": "call"
}
] |
35416632908 | import logging
import battle.main
import memory.main
import screen
import xbox
FFXC = xbox.controller_handle()
logger = logging.getLogger(__name__)
def yojimbo(gil_value: int = 263000):
logger.info("Yojimbo overdrive")
screen.await_turn()
memory.main.wait_frames(6)
if not screen.turn_aeon():
return
while memory.main.battle_menu_cursor() != 35:
xbox.menu_up()
memory.main.wait_frames(6)
xbox.menu_b()
logger.info("Selecting amount")
memory.main.wait_frames(15)
battle.main.calculate_spare_change_movement(gil_value)
logger.info(f"Amount selected: {gil_value}")
xbox.tap_b()
xbox.tap_b()
xbox.tap_b()
xbox.tap_b()
xbox.tap_b()
return
| coderwilson/FFX_TAS_Python | battle/overdrive.py | overdrive.py | py | 723 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "xbox.controller_handle",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "screen.await_turn",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "memory.main... |
8385108161 | from __future__ import absolute_import
from __future__ import print_function
import argparse
from lxml import etree
parser = argparse.ArgumentParser(
description='Create tls links from sumo net as needed by tls_csv2SUMO.py. You have to edit the link number ' +
'field (preset with g). The comment gives the link number shown on demand in SUMO-GUI')
parser.add_argument('net', help='Input file name')
args = parser.parse_args()
doc = etree.parse(args.net)
connections = {}
for conn in doc.xpath('//connection'):
if 'linkIndex' in conn.attrib:
# use traffic light id and right adjusted number for sorting and as
# comment
numIndex = conn.attrib['linkIndex']
index = conn.attrib['tl'] + ';' + numIndex.zfill(3)
connections[index] = conn.attrib['from'] + '_' + conn.attrib['fromLane'] + \
';' + conn.attrib['to'] + '_' + conn.attrib['toLane']
# print record
# print conn.attrib['from'], conn.attrib['to'],
# conn.attrib['linkIndex']
for conn in sorted(connections):
# print conn, connections[conn]
print("link;g;{};0".format(connections[conn]).ljust(50) + '#' + str(conn).rjust(3))
| ngctnnnn/DRL_Traffic-Signal-Control | sumo-rl/sumo/tools/tls/createTlsCsv.py | createTlsCsv.py | py | 1,181 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "lxml.etree.parse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 14,
"usage_type": "name"
}
] |
13974817829 | from pydantic import BaseModel, Field
from typing import List, Union
import pydantic
from .validators import validate_polygon, validate_is_plane_orthogonal_to_polygon, validate_plane_normal_is_not_zero
class Point3DModel(BaseModel):
__root__: List[float] = Field(..., min_items=3, max_items=3)
class PlaneModel(BaseModel):
point: Point3DModel = Field()
normal: Point3DModel = Field()
@pydantic.validator('normal', always=True)
@classmethod
def validate_plane_normal_is_not_zero(cls, value):
validate_plane_normal_is_not_zero(value.__root__)
return value
class Point2DModel(BaseModel):
__root__: List[float] = Field(..., min_items=2, max_items=2)
def to_3D(self) -> Point3DModel:
return Point3DModel(__root__=[*self.__root__, 0])
class EmptyPolygonModel(BaseModel):
__root__: List[Point2DModel] = Field(..., min_items=0, max_items=0)
class PolygonModel(BaseModel):
__root__: List[Point2DModel] = Field(..., min_items=3)
@classmethod
def get_polygon_normal(cls) -> Point3DModel:
return Point3DModel(__root__=[0, 0, 1])
@classmethod
def get_polygon_plane(cls) -> PlaneModel:
return PlaneModel(
point=Point3DModel(__root__=[0, 0, 0]),
normal=cls.get_polygon_normal()
)
@pydantic.validator('__root__', always=True)
@classmethod
def validate_is_polygon_convex(cls, value):
points = [v.__root__ for v in value]
validate_polygon(points)
return value
class SplittingRequestData(BaseModel):
polygon: PolygonModel = Field()
plane: PlaneModel = Field()
@pydantic.root_validator(skip_on_failure=True)
@classmethod
def validate_is_plane_orthogonal_to_polygon(cls, values):
plane_normal = values['plane'].normal
polygon_normal = PolygonModel.get_polygon_normal()
validate_is_plane_orthogonal_to_polygon(
plane_normal.__root__,
polygon_normal.__root__,
)
return values
class SplittingResponseData(BaseModel):
polygon1: PolygonModel = Field()
polygon2: Union[PolygonModel, EmptyPolygonModel] = Field()
| mikheev-dev/polygon_splitter | src/data_model.py | data_model.py | py | 2,164 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
... |
33551925024 | import requests as rq
from dotenv import load_dotenv
import os
import smtplib
import sys
class FPL:
URL = 'https://fantasy.premierleague.com/api/bootstrap-static/'
def __init__(self):
self.response_raw = rq.get(FPL.URL)
load_dotenv()
self.email_sent = os.getenv('EMAIL_SENT')
self.app_pw = os.getenv('APP_PW')
self.email = os.getenv('EMAIL')
if self.response_raw.status_code != 200:
self.send_email('error', str(self.response_raw.status_code) + ";" + self.response_raw.text)
self.shit_hit_the_fan = True
return
self.shit_hit_the_fan = False
self.response = self.response_raw.json()
self.tot_players = int(self.response['total_players'])
def send_email(self, setting: str = 'normal', error_message: str = ''):
if setting == 'normal':
message = 'Subject: {}\n\n{}'.format("FPL REGISTRATION IS OPEN", "GO GET THEM")
elif setting == 'ping':
message = 'Subject: {}\n\n{}'.format("PING FPL", "Script is working")
else:
message = 'Subject: {}\n\n{}'.format("ERROR FPL", error_message)
try:
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(self.email, self.app_pw)
s.sendmail(self.email, self.email, message)
s.quit()
return True
except Exception as e:
print(e)
return False
def run(self):
if 0 < self.tot_players < 1_000_000:
if self.email_sent is None:
if self.send_email():
with open(".env", "a") as f:
f.write("EMAIL_SENT=1")
def ping(self):
if self.send_email('ping'):
pass
if __name__ == '__main__':
fpl = FPL()
if not fpl.shit_hit_the_fan:
if len(sys.argv) == 1:
fpl.run()
else:
fpl.ping()
| FilleDille/fpl_reg_chaser | main.py | main.py | py | 1,966 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_numbe... |
5480419217 | import os
import requests
from bs4 import BeautifulSoup
import re
import time
import sys
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36'
def get_music_data(url):
"""
用于获取歌曲列表中的歌曲信息
"""
headers = {'User-Agent':user_agent}
webData = requests.get(url,headers=headers).text
soup = BeautifulSoup(webData,'lxml')
find_list = soup.find('ul',class_="f-hide").find_all('a')
tempArr = []
for a in find_list:
music_id = a['href'].replace('/song?id=','')
music_name = a.text
tempArr.append({'id':music_id,'name':music_name})
return tempArr
def get(values,output_path):
"""
用于下载歌曲
"""
downNum = 0
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
for x in values:
x['name'] = re.sub(rstr, "_", x['name'])# 替换字符串中的匹配项
if not os.path.exists(output_path + os.sep + x['name'] + '.mp3'):
print('[*] '+ x['name'] + '.mp3 下载中...')
url = 'http://music.163.com/song/media/outer/url?id=' + x['id'] + '.mp3'
try:
save_file(url , output_path + os.sep + x['name'] + '.mp3')
downNum = downNum + 1
print('[+] '+ x['name'] + '.mp3 下载完成 !')
except:
print('[+] '+ x['name'] + '.mp3 下载失败 !')
print('[+] 共计下载完成歌曲 ' + str(downNum) + ' 首 !')
def save_file(url,path):
"""
用于保存歌曲文件
"""
headers = {'User-Agent':user_agent,'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8','Upgrade-Insecure-Requests':'1'}
response = requests.get(url,headers=headers)
f = open(path, 'wb')
f.write(response.content)
f.flush()
def poc_head():
print("""
__ _______.___._____.___. __________ __ .__.__
/ \ / \__ | |\__ | | \______ \ ____ _______/ |_|__| | ____
\ \/\/ // | | / | | | _// __ \\____ \ __\ | | _/ __ \
\ / \____ | \____ | | | \ ___/| |_> > | | | |_\ ___/
\__/\ / / ______| / ______|____|____|_ /\___ > __/|__| |__|____/\___ >
\/ \/ \/ /_____/ \/ \/|__| \/
author 昊辰
博客: www.haochen1204.com
公众号: 霜刃信安
""")
def main():
url = ''
output_path = sys.argv[0][0:len(sys.argv[0])-len(os.path.basename(sys.argv[0]))]+'music_'+time.strftime('%Y%m%d%H%M', time.localtime())
poc_head()
url = input('请输入歌单的网址:').replace("#/","")
if not os.path.exists(output_path):
os.makedirs(output_path)
music_list = get_music_data(url)
print('[+] 歌单获取成功! 共计',len(music_list),'首歌曲!')
get(music_list,output_path)
print('[+] 歌曲存放目录为 '+output_path+' 文件')
print('[+] 程序运行结束 10秒后自动退出')
time.sleep(10)
main()
| haochen1204/Reptile_WYYmusic | 网易云爬虫.py | 网易云爬虫.py | py | 3,336 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_numb... |
39868308641 | from django.db import models
from main.model.playlist import Playlist
from main.model.track import Track
class PlaylistTracks(models.Model):
playlist = models.ForeignKey(
Playlist, on_delete=models.CASCADE
) # при удалении плейлиста чистится кросс-таблица
track = models.ForeignKey(
Track, on_delete=models.CASCADE
) # при удалении трека чистится кросс
track_order = models.PositiveIntegerField(default=0)
def __str__(self):
return "%s.%02d~ %s // %s / %s -- %s -- " % (
self.track.pgm.num,
self.track.pos,
self.track.pgm.name,
self.track.artist,
self.track.album,
self.track.title,
) + "%02d:%02d" % divmod(self.track.duration, 60)
class Meta:
ordering = ["track_order"]
verbose_name_plural = "Треки"
verbose_name = "Трек"
| artemgv/spacemusic | app/main/model/playlisttracks.py | playlisttracks.py | py | 992 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": ... |
73573186747 | import requests
addresses = {
"TXA2WjFc5f86deJcZZCdbdpkpUTKTA3VDM": "energyRateModelContract",
"TSe1pcCnU1tLdg69JvbFmQirjKwTbxbPrG": "sTRXImpl",
"TU3kjFuhtEo42tsCBtfYUAZxoqQ4yuSLQ5": "sTRXProxy",
"TNoHbPuBQrVanVf9qxUsSvHdB2eDkeDAKD": "marketImpl",
"TU2MJ5Veik1LRAgjeSzEdvmDYx7mefJZvd": "marketProxy",
}
json_ori = {
"contractAddress": "",
"contractName": "",
"projectId": 2,
"remark": "",
"accessToken": "tronsmart"
}
for address, name in addresses.items():
json = json_ori.copy()
json['contractAddress'] = address
json['contractName'] = name
resp = requests.post("https://mining.ablesdxd.link" + "/admin/upsertContractIntoWhiteList", json=json)
print(f"{address} {name} {resp.text}")
| dpneko/pyutil | contract_whitelist.py | contract_whitelist.py | py | 733 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 28,
"usage_type": "call"
}
] |
37555045718 | # Low-Dose CT with a Residual Encoder-Decoder Convolutional Neural Network (RED-CNN)
# https://arxiv.org/ftp/arxiv/papers/1702/1702.00288.pdf
# reference https://github.com/SSinyu/RED-CNN
import os
import numpy as np
import torch.nn as nn
from model import common
def make_model(args, parent=False):
return REDCNN(args)
class REDCNN(nn.Module):
def __init__(self, args):
super(REDCNN, self).__init__()
self.args = args
out_ch = args.n_feats
in_ch = args.n_colors
self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.conv3 = nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.conv4 = nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.conv5 = nn.Conv2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv1 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv2 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv3 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv4 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=5, stride=1, padding=0)
self.tconv5 = nn.ConvTranspose2d(out_ch, in_ch, kernel_size=5, stride=1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
# encoder
residual_1 = x
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
residual_2 = out
out = self.relu(self.conv3(out))
out = self.relu(self.conv4(out))
residual_3 = out
out = self.relu(self.conv5(out))
# decoder
out = self.tconv1(out)
out += residual_3
out = self.tconv2(self.relu(out))
out = self.tconv3(self.relu(out))
out += residual_2
out = self.tconv4(self.relu(out))
out = self.tconv5(self.relu(out))
out += residual_1
out = self.relu(out)
return out | stefenmax/pytorch-template-medical-image-restoration | src-v3/model/redcnn.py | redcnn.py | py | 2,084 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
5384553044 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
__DIR__ = os.path.abspath(os.path.dirname(__file__))
from ply import lex, yacc
from .data import Domain, MsgId, MsgStr, MsgStrPlural, MsgStrList, Message
class ParserException(Exception):
pass
DEBUG = 0
tokens = (
'COMMENT',
'DOMAIN',
'PREV_START',
'PREV_MSGCTXT',
'PREV_MSGID',
'PREV_MSGID_PLURAL',
'PREV_STRING',
'MSGCTXT',
'MSGID',
'MSGID_PLURAL',
'MSGSTR',
'NUMBER',
'STRING'
)
t_DOMAIN = r'domain'
t_MSGID = r'msgid'
t_MSGID_PLURAL = r'msgid_plural'
t_MSGSTR = r'msgstr'
t_MSGCTXT = r'msgctxt'
t_ignore = ' \t'
t_prev_ignore = t_ignore
literals = '[]'
states = (
('prev', 'exclusive'),
)
def t_PREV_START(t):
r'\#\|'
t.lexer.begin('prev')
return t
def t_COMMENT(t):
r'\#.*\n'
t.value = t.value[:-1]
return t
def t_STRING(t):
r'\"(?P<content>([^\\\n]|(\\.))*?)\"'
stval = t.lexer.lexmatch.group("content")
t.value = stval if stval else ''
return t
def t_NUMBER(t):
r'[0-9]+'
t.value = int(t.value)
return t
t_prev_NUMBER = t_NUMBER
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(t):
raise SyntaxError("Illegal character %r on %d" % (t.value[0], t.lexer.lineno))
t_prev_error = t_error
def t_prev_MSGCTXT(t):
r'msgctxt'
t.type = 'PREV_MSGCTXT'
return t
def t_prev_MSGID(t):
r'msgid'
t.type = 'PREV_MSGID'
return t
def t_prev_MSGID_PLURAL(t):
r'msgid_plural'
t.type = 'PREV_MSGID_PLURAL'
return t
def t_prev_STRING(t):
r'\"(?P<content>([^\\\n]|(\\.))*?)\"'
t.type = 'PREV_STRING'
stval = t.lexer.lexmatch.group("content")
t.value = stval if stval else ''
return t
def t_prev_newline(t):
r'\n+'
t.lexer.begin('INITIAL')
t.lexer.lineno += len(t.value)
def p_empty(p):
"empty :"
pass
def p_error(p):
raise PerserException(str(p))
def p_po_file(p):
"""
po_file : po_file comment
| po_file domain
| po_file message
| po_file error
| empty
"""
if len(p) == 2:
p[0] = []
else:
p[0] = p[1] + [p[2]]
def p_comment(p):
"""
comment : COMMENT
"""
p[0] = p[1]
def p_dommain(p):
"""
domain : DOMAIN STRING
"""
p[0] = Domain(p[2])
## -- message -- ##
def p_message(p):
"""
message : message_intro string_list MSGSTR string_list
"""
if p[1] and isinstance(p[1], tuple):
msgid = MsgId(p[2], ctxt=p[1][1])
prev = p[1][0]
else:
msgid = MsgId(p[2], ctxt=p[1])
prev = None
msgstr = MsgStr(p[4])
p[0] = Message(msgid, msgstr, prev=prev)
def p_message_plural(p):
"""
message : message_intro string_list msgid_pluralform pluralform_list
"""
if p[1] and isinstance(p[1], tuple):
msgid = MsgId(p[2], ctxt=p[1][1], pluralform=p[3])
prev = p[1][0]
else:
msgid = MsgId(p[2], ctxt=p[1], pluralform=p[3])
prev = None
msgstr = MsgStrList(p[4])
p[0] = Message(msgid, msgstr, prev=prev)
def p_message_no_msgstrplural(p):
"""
message : message_intro string_list msgid_pluralform
"""
raise PercerException("missing 'msgstr[0]' section")
def p_message_no_msgidplural(p):
"""
message : message_intro string_list pluralform_list
"""
raise PercerException("missing 'msgid_plural' section")
def p_message_no_msgstr(p):
"""
message : message_intro string_list
"""
raise PercerException("missing 'msgstr' section")
## -- message end -- ##
def p_message_intro(p):
"""
message_intro : msg_intro
| prev msg_intro
"""
if len(p)==3:
p[0] = (p[1], p[2])
else:
p[0] = p[1]
def p_prev(p):
"""
prev : prev_msg_intro prev_string_list
| prev_msg_intro prev_string_list prev_msgid_pluralform
"""
if len(p)==3:
p[0] = MsgId(p[2], ctxt=p[1])
else:
p[0] = MsgId(p[2], pluralform=p[3], ctxt=p[1])
def p_msg_intro(p):
"""
msg_intro : MSGID
| MSGCTXT string_list MSGID
"""
if len(p)==2:
return
else:
p[0] = p[2]
def p_prev_msg_intro(p):
"""
prev_msg_intro : PREV_START PREV_MSGID
| PREV_START PREV_MSGCTXT prev_string_list PREV_START PREV_MSGID
"""
if len(p)==3:
return
else:
p[0] = p[3]
def p_msgid_pluralform(p):
"""
msgid_pluralform : MSGID_PLURAL string_list
"""
p[0] = p[2]
def p_prev_msgid_pluralform(p):
"""
prev_msgid_pluralform : PREV_MSGID_PLURAL prev_string_list
"""
p[0] = p[2]
def p_pluralform_list(p):
"""
pluralform_list : pluralform
| pluralform_list pluralform
"""
if len(p)==2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_pluralform(p):
"""
pluralform : MSGSTR '[' NUMBER ']' string_list
"""
p[0] = MsgStrPlural(number=p[3], value=p[5])
def p_string_list(p):
"""
string_list : STRING
| string_list STRING
"""
if len(p)==2:
p[0] = p[1]
else:
p[0] = p[1] + p[2]
def p_prev_string_list(p):
"""
prev_string_list : PREV_STRING
| prev_string_list PREV_STRING
"""
if len(p)==2:
p[0] = p[1]
else:
p[0] = p[1] + p[2]
start = str('po_file')
lexer = lex.lex(debug=DEBUG)
parser = yacc.yacc(outputdir=__DIR__, debug=DEBUG, write_tables=False)
def parse(f):
ret = parser.parse(f.read())
parser.restart()
return ret
| takada-at/ponda | ponda/parser.py | parser.py | py | 5,662 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "data.Domain",
"line_num... |
31211285971 | ''' TAREA5
Determina el tamaño de muestra requerido por cada lugar decimal
de precisión del estimado obtenido para el integral, comparando
con Wolfram Alpha para por lo menos desde uno hasta siete decimales;
representa el resultado como una sola gráfica o de tipo caja-bigote
o un diagrama de violin.
'''
from math import exp, pi
import numpy as np
def g(x):
return (2 / (pi * (exp(x) + exp(-x))))
wolf = str(0.0488340) #con 7 decimales
vg = np.vectorize(g)
X = np.arange(-8, 8, 0.001) # ampliar y refinar
Y = vg(X) # mayor eficiencia
from GeneralRandom import GeneralRandom
generador = GeneralRandom(np.asarray(X), np.asarray(Y))
desde = 3
hasta = 7
pedazo = 10 # desde 1 hasta 1000000 hasta
cuantos = 200 # 200
def parte(replica):
V = generador.random(pedazo)[0]
return ((V >= desde) & (V <= hasta)).sum()
import multiprocessing
if __name__ == "__main__":
state = 0
with multiprocessing.Pool(2) as pool:
while (True):
montecarlo = pool.map(parte, range(cuantos))
integral = sum(montecarlo) / (cuantos * pedazo)
num = str((pi / 2) * integral)
pedazo = pedazo + 100
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and state == 0: # 1er Decimal
print("Se logra el primer decimal con:", pedazo, "pedazos. Dado que:", num[2], " es igual a", wolf[2])
state = 1
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and state == 1: # 2do Decimal
print("Se logra el segunda decimal con:", pedazo, "pedazos. Dado que:", num[3], " es igual a", wolf[3])
state = 2
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and state == 2: # 3er Decimal
print("Se logra el tercer decimal con:", pedazo, "pedazos. Dado que:", num[4], " es igual a", wolf[4])
state = 3
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and num[5] == wolf[5] and state == 3: # 4to Decimal
print("Se logra el cuarto decimal con:", pedazo, "pedazos. Dado que:", num[5], " es igual a", wolf[5])
state = 4
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and num[5] == wolf[5] and num[6] == wolf[6] and state == 4: # 5to Decimal
print("Se logra el quinto decimal con:", pedazo, "pedazos. Dado que:", num[6], " es igual a", wolf[6])
state = 5
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and num[5] == wolf[5] and num[6] == wolf[6] and num[7] == wolf[7] and state == 5: # 6to Decimal
print("Se logra el sexto decimal con:", pedazo, "pedazos. Dado que:", num[7], " es igual a", wolf[7])
state = 6
if num[0] == wolf[0] and num[1] == wolf[1] and num[2] == wolf[2] and num[3] == wolf[3] and num[4] == wolf[4] and num[5] == wolf[5] and num[6] == wolf[6] and num[7] == wolf[7] and num[8] == wolf[8] and state == 6: # 7mo Decimal
print("Se logra el septimo decimal con:", pedazo, "pedazos. Dado que:", num[8], " es igual a", wolf[8])
break
print(pedazo, num)
| Elitemaster97/Simulacion | Tarea5/Tarea5.1.py | Tarea5.1.py | py | 3,447 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "math.pi",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "math.exp",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.vectorize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 16... |
35279730803 | from flask import Flask, url_for, render_template, request, flash, redirect, session, abort, jsonify
import RPi.GPIO as GPIO
import subprocess, os, logging
import ipdb
from config import Config
from time import sleep
'''initial VAR'''
# Light GPIO
RELAIS_4_GPIO = 2
# Water GPIO
RELAIS_WATER_GPIO = 22
logging.basicConfig(
filename='server.log',
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p'
)
app = Flask(__name__)
app.config.from_object(Config)
TOKEN = app.config['TOKEN']
'''functions'''
# Turn the light on
@app.route('/accendilucicortile', methods=['POST'])
def lights_on():
token = request.json.get('token', None)
if token != TOKEN:
logging.debug('not authorized access')
return jsonify({"msg": "Unauthorized"}), 400
elif token == TOKEN:
logging.debug('Turn the lights on')
GPIO.output(RELAIS_4_GPIO, GPIO.LOW)
logging.debug('Lights are on')
return jsonify({"msg": "Lights on"}), 200
else:
return jsonify({"msg": "This should never happen"}), 200
# lights off
@app.route('/spegnilucicortile', methods=['POST'])
def lights_off():
token = request.json.get('token', None)
if token != TOKEN:
logging.debug('not authorized access')
return jsonify({"msg": "Unauthorized"}), 400
elif token == TOKEN:
logging.debug('Turn the lights off')
GPIO.output(RELAIS_4_GPIO, GPIO.HIGH)
logging.debug('Lights are off')
return jsonify({"msg": "Lights off"}), 200
else:
return jsonify({"msg": "This should never happen"}), 200
# water on
@app.route('/accendiacqua', methods=['POST'])
def water_on():
token = request.json.get('token', None)
if token != TOKEN:
logging.debug('not authorized access')
return jsonify({"msg": "Unauthorized"}), 400
elif token == TOKEN:
GPIO.output(RELAIS_WATER_GPIO, GPIO.LOW)
logging.debug('Starting irrigation')
sleep(5)
if GPIO.input(RELAIS_WATER_GPIO):
logging.error('Irrigation not started')
else:
logging.debug('Irrigation correctly started')
return "<h1>Irrigation is on</h1>"
else:
return jsonify({"msg": "This should never happen"}), 200
# water off
@app.route('/spegniacqua', methods=['POST'])
def water_off():
token = request.json.get('token', None)
if token != TOKEN:
logging.debug('not authorized access')
return jsonify({"msg": "Unauthorized"}), 400
elif token == TOKEN:
GPIO.output(RELAIS_WATER_GPIO, GPIO.HIGH)
logging.debug('Stopping Irrigation')
sleep(5)
if GPIO.input(RELAIS_WATER_GPIO):
logging.debug('Irrigation correctly stopped')
else:
logging.error('Irrigation not stopped')
return "<h1>Irrigation is off</h1>"
else:
return jsonify({"msg": "This should never happen"}), 200
if __name__ == '__main__':
logging.info('starting up')
GPIO.setmode(GPIO.BCM)
GPIO.setup(RELAIS_4_GPIO,GPIO.OUT, initial=GPIO.HIGH) #lights off
if GPIO.input(RELAIS_4_GPIO):
logging.debug('Luce spenta')
else:
logging.debug('Luce accesa')
GPIO.setup(RELAIS_WATER_GPIO, GPIO.OUT, initial=GPIO.HIGH) #water off
if GPIO.input(RELAIS_WATER_GPIO):
logging.debug('Irrigazione spenta')
else:
logging.debug('Irrigazione accesa')
app.secret_key = os.urandom(12)
try:
app.run(
debug=True,
host='0.0.0.0',
port=5000
)
except:
logging.info('exception')
finally:
GPIO.output(RELAIS_WATER_GPIO, GPIO.HIGH)
GPIO.cleanup()
| oldgiova/python-api-webservice-lightscontrol | main.py | main.py | py | 3,747 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "config.Config",
... |
17176402024 | import datetime
import h5py
import librosa
import numpy as np
import os
import pandas as pd
import soundfile as sf
import sys
import time
import localmodule
# Define constants.
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
orig_sr = localmodule.get_sample_rate()
negative_labels = localmodule.get_negative_labels()
clip_length = int(0.500 * orig_sr) # a clip lasts 500 ms
args = sys.argv[1:]
unit_str = args[0]
units = localmodule.get_units()
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Generating " + dataset_name + " clips for " + unit_str + ".")
print("h5py version: {:s}".format(h5py.__version__))
print("librosa version: {:s}".format(librosa.__version__))
print("numpy version: {:s}".format(np.__version__))
print("pandas version: {:s}".format(pd.__version__))
print("soundfile version: {:s}".format(sf.__version__))
print("")
# Create directory for original (i.e. non-augmented) clips.
predictions_name = "_".join([dataset_name, "baseline-predictions"])
predictions_dir = os.path.join(data_dir, predictions_name)
recordings_name = "_".join([dataset_name, "full-audio"])
recordings_dir = os.path.join(data_dir, recordings_name)
annotations_name = "_".join([dataset_name, "annotations"])
annotations_dir = os.path.join(data_dir, annotations_name)
dataset_wav_name = "_".join([dataset_name, "audio-clips"])
dataset_wav_dir = os.path.join(data_dir, dataset_wav_name)
os.makedirs(dataset_wav_dir, exist_ok=True)
original_dataset_wav_dir = os.path.join(dataset_wav_dir, "original")
os.makedirs(original_dataset_wav_dir, exist_ok=True)
# Create directory corresponding to the recording unit.
unit_dir = os.path.join(original_dataset_wav_dir, unit_str)
os.makedirs(unit_dir, exist_ok=True)
# Open full night recording.
samples = []
annotation_name = unit_str + ".txt"
annotation_path = os.path.join(annotations_dir, annotation_name)
df = pd.read_csv(annotation_path, sep='\t')
recording_name = unit_str + ".flac"
recording_path = os.path.join(recordings_dir, recording_name)
full_night = sf.SoundFile(recording_path)
n_positive_samples = 0
n_negative_samples = 0
# Export every annotation either as positive (flight call) or negative (alarm).
for index, row in df.iterrows():
# Compute center time of the annotation bounding box.
begin_time = float(row["Begin Time (s)"])
end_time = float(row["End Time (s)"])
mid_time = 0.5 * (begin_time + end_time)
sample = int(orig_sr * mid_time)
sample_str = str(sample).zfill(9)
# Compute center frequency of the annotation bounding box.
low_freq = float(row["Low Freq (Hz)"])
high_freq = float(row["High Freq (Hz)"])
mid_freq = 0.5 * (low_freq + high_freq)
freq_str = str(int(mid_freq)).zfill(5)
if "Calls" in row and row["Calls"] in negative_labels:
label_str = "0"
n_negative_samples = n_negative_samples + 1
else:
label_str = "1"
n_positive_samples = n_positive_samples + 1
clip_list = [unit_str, sample_str, freq_str, label_str, "original.wav"]
clip_str = "_".join(clip_list)
# Read.
sample_start = sample - int(0.5 * clip_length)
full_night.seek(sample_start)
data = full_night.read(clip_length)
# Export.
clip_path = os.path.join(unit_dir, clip_str)
sf.write(clip_path, data, orig_sr)
samples.append(sample)
# The number of false positives to be added to the dataset is equal to the
# difference between the number of annotated positives and
# the number of annotated negatives.
n_false_positives = n_positive_samples - n_negative_samples
print("Number of positives: " + str(n_positive_samples) + ".")
print("Number of negatives: " + str(n_negative_samples) + ".")
print("Number of false positives (clips fooling baseline detector): "
+ str(n_false_positives) + ".")
print("Total number of clips: " + str(2*n_positive_samples) + ".")
print("")
# Load probabilities of the baseline prediction model.
prediction_name = unit_str + ".npy"
prediction_path = os.path.join(predictions_dir, prediction_name)
prob_matrix = np.load(prediction_path)
# Retrieve timestamps corresponding to decreasing confidences.
prob_samples = (prob_matrix[:, 0] * orig_sr).astype('int')
probs = prob_matrix[:, 1]
sorting_indices = np.argsort(probs)[::-1]
sorted_probs = probs[sorting_indices]
sorted_prob_samples = prob_samples[sorting_indices]
sorted_prob_samples = sorted_prob_samples
# The exported false positives correspond to the timestamps with highest
# confidences under the condition that they are 12000 samples (500 ms) apart
# from all previously exported clips.
prob_counter = 0
false_positive_counter = 0
while false_positive_counter < n_false_positives:
prob_sample = sorted_prob_samples[prob_counter]
dists = [np.abs(sample-prob_sample) for sample in samples]
min_dist = np.min(dists)
if min_dist > clip_length:
# Append sample to growing list.
samples.append(prob_sample)
sample_str = str(prob_sample).zfill(9)
# By convention, the frequency of a false positive example is 0 Hz.
freq_str = str(0).zfill(5)
clip_list = [unit_str, sample_str, freq_str, "0", "original.wav"]
false_positive_counter = false_positive_counter + 1
clip_str = "_".join(clip_list)
# Read.
sample_start = prob_sample - int(0.5 * clip_length)
full_night.seek(sample_start)
data = full_night.read(clip_length)
# Export.
clip_path = os.path.join(unit_dir, clip_str)
sf.write(clip_path, data, orig_sr)
prob_counter = prob_counter + 1
# Print elapsed time.
print(str(datetime.datetime.now()) + " Finish.")
elapsed_time = time.time() - int(start_time)
elapsed_hours = int(elapsed_time / (60 * 60))
elapsed_minutes = int((elapsed_time % (60 * 60)) / 60)
elapsed_seconds = elapsed_time % 60.
elapsed_str = "{:>02}:{:>02}:{:>05.2f}".format(elapsed_hours,
elapsed_minutes,
elapsed_seconds)
print("Total elapsed time: " + elapsed_str + ".")
| BirdVox/bv_context_adaptation | src/001_generate-audio-clips.py | 001_generate-audio-clips.py | py | 6,126 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "localmodule.get_data_dir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "localmodule.get_dataset_name",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "localmodule.get_sample_rate",
"line_number": 17,
"usage_type": "call"
},
{
... |
30039234138 | from django.views.decorators.http import require_http_methods
from django.http import JsonResponse
from django.http import HttpResponse
from .service.login import Login
from .service.report import uploadData
from .service.getdata import getRoadMap
import json
# Create your views here.
@require_http_methods(["GET", "POST"])
def wx_test(request):
response = {}
try:
# json_result = json.loads(request.body)
# print(request.body)
# process.decode_json(json_result)
response['ok'] = 1
except Exception as e:
response['msg'] = str(e)
response['ok'] = 0
return JsonResponse(response)
# 登录函数
@require_http_methods(["GET"])
def login(request):
response = {}
try:
code = request.GET.get('code')
# print(code)
response = Login(code)
except Exception as e:
response['msg'] = str(e)
response['ok'] = 0
return JsonResponse(response)
# 上传数据函数
@require_http_methods(["POST"])
def uploaddata(request):
response = {}
try:
json_result = json.loads(request.body)
response = uploadData(json_result['data'])
except Exception as e:
response['msg'] = str(e)
response['ok'] = 0
return JsonResponse(response)
# 获取路线图函数
@require_http_methods(["GET"])
def getroadmap(request):
response = {}
try:
response = getRoadMap()
except Exception as e:
response['msg'] = str(e)
response['ok'] = 0
return JsonResponse(response) | luzy99/road_smoothness_detection | road_detect_server/my_server/wx/views.py | views.py | py | 1,542 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.http.JsonResponse",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.http.require_http_methods",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "service.login.Login",
"line_number": 35,
"usage_type": "call"
... |
14279541024 | from collections import defaultdict
from intcode.intcode import read_program, VM
import matplotlib.pyplot as plt
DIRECTION_LEFT = (-1, 0)
DIRECTION_RIGHT = (1, 0)
DIRECTION_UP = (0, -1)
DIRECTION_DOWN = (0, 1)
TURN_LEFT = 0
TURN_RIGHT = 1
COLOR_BLACK = 0
COLOR_WHITE = 1
next_direction_left = {
DIRECTION_UP: DIRECTION_LEFT,
DIRECTION_LEFT: DIRECTION_DOWN,
DIRECTION_DOWN: DIRECTION_RIGHT,
DIRECTION_RIGHT: DIRECTION_UP
}
next_direction_right = {
DIRECTION_UP: DIRECTION_RIGHT,
DIRECTION_RIGHT: DIRECTION_DOWN,
DIRECTION_DOWN: DIRECTION_LEFT,
DIRECTION_LEFT: DIRECTION_UP
}
def compute_position_to_color(initial_state, inital_color):
position_to_color = defaultdict(lambda: COLOR_BLACK)
position = (0, 0)
direction = DIRECTION_UP
position_to_color[position] = inital_color
vm = VM(initial_state)
running = True
while running:
try:
current_color = position_to_color[position]
vm.send_input(current_color)
color = vm.get_output()
turn = vm.get_output()
position_to_color[position] = color
direction = next_direction(direction, turn)
position = next_position(position, direction)
except StopIteration:
running = False
return position_to_color
def next_direction(current_direction, turn):
if turn == TURN_LEFT:
return next_direction_left[current_direction]
else:
return next_direction_right[current_direction]
def next_position(position, direction):
dx, dy = direction
x, y = position
return (x + dx, y + dy)
def draw(position_to_color):
white_positions = [position for position, color in position_to_color.items() if color == COLOR_WHITE]
x, y = zip(*white_positions)
plt.scatter(x, y, s=20)
plt.ylim((10, -20))
plt.show()
if __name__ == "__main__":
initial_state = read_program("input.txt")
position_to_color = compute_position_to_color(initial_state, COLOR_BLACK)
print("Part 1:", len(position_to_color))
position_to_color = compute_position_to_color(initial_state, COLOR_WHITE)
draw(position_to_color)
| bwdvolde/advent-of-code-2019 | day11/solution.py | solution.py | py | 2,178 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "intcode.intcode.VM",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "... |
25205799104 | # -*- coding: utf-8 -*-
"""
Abstract class for detectors
"""
import abc
class Embedder(abc.ABC):
@abc.abstractmethod
def embed(self):
'Return embed features'
return NotImplemented
@abc.abstractmethod
def get_input_shape(self):
'Return input shape'
return NotImplemented
from easydict import EasyDict as edict
import mxnet as mx
import numpy as np
import cv2
from skimage import transform as trans
class ArcFace_Embedder(Embedder):
def get_input_shape(self):
pass
def do_flip(self, data):
for idx in range(data.shape[0]):
data[idx,:,:] = np.fliplr(data[idx,:,:])
return data
def __init__(self):
modeldir = './model/insight_face/model-r50-am-lfw/model'
gpuid = 0
ctx = mx.gpu(gpuid)
self.nets = []
image_shape = [3, 112, 112]
modeldir_=modeldir+',0'
for model in modeldir_.split('|'):
vec = model.split(',')
assert len(vec)>1
prefix = vec[0]
epoch = int(vec[1])
print('loading',prefix, epoch)
net = edict()
net.ctx = ctx
net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(prefix, epoch)
#net.arg_params, net.aux_params = ch_dev(net.arg_params, net.aux_params, net.ctx)
all_layers = net.sym.get_internals()
net.sym = all_layers['fc1_output']
net.model = mx.mod.Module(symbol=net.sym, context=net.ctx, label_names = None)
net.model.bind(data_shapes=[('data', (1, 3, image_shape[1], image_shape[2]))])
net.model.set_params(net.arg_params, net.aux_params)
#_pp = prefix.rfind('p')+1
#_pp = prefix[_pp:]
#net.patch = [int(x) for x in _pp.split('_')]
#assert len(net.patch)==5
#print('patch', net.patch)
self.nets.append(net)
def align(self, detections):
warped_images=[]
for det in detections:
raw_face_image = det['face_img']
#plt.imshow(raw_face_image)
#plt.show()
image_size = [112,112]
src = np.array([
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041] ], dtype=np.float32 )
if image_size[1]==112:
src[:,0] += 8.0
offset = ([
[det['face_bbox'][0],det['face_bbox'][1]],
[det['face_bbox'][0],det['face_bbox'][1]],
[det['face_bbox'][0],det['face_bbox'][1]],
[det['face_bbox'][0],det['face_bbox'][1]],
[det['face_bbox'][0],det['face_bbox'][1]]
])
npoint= np.array(det['face_keypoint']) - np.array(offset)
dst = npoint#.reshape( (2,5) ).T
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2,:]
warped = cv2.warpAffine(raw_face_image,M,(image_size[1],image_size[0]), borderValue = 0.0)
#plt.imshow(warped)
warped_images.append(warped)
return warped_images
def embed(self, detections):
det_with_face = [ det for det in detections if det['face_img'] is not None]
if len(det_with_face)==0:
return detections
aligned_face_images = self.align(det_with_face)
embeds =[]
# Image_based Detection time per face : 0.018270
# for image in aligned_face_images:
# image = np.transpose( image, (2,0,1) )
# F = None
# for net in self.nets:
# embedding = None
# #ppatch = net.patch
# for flipid in [0,1]:
# _img = np.copy(image)
# if flipid==1:
# #plt.imshow(np.transpose( _img, (1,2,0) )[:,:,::-1])
# #plt.show()
# _img = self.do_flip(_img)
# #plt.imshow(np.transpose( _img, (1,2,0) )[:,:,::-1])
# #plt.show()
# input_blob = np.expand_dims(_img, axis=0)
# data = mx.nd.array(input_blob)
# db = mx.io.DataBatch(data=(data,))
# net.model.forward(db, is_train=False)
# _embedding = net.model.get_outputs()[0].asnumpy().flatten()
# #print(_embedding.shape)
# if embedding is None:
# embedding = _embedding
# else:
# embedding += _embedding
# _norm=np.linalg.norm(embedding)
# embedding /= _norm
# if F is None:
# F = embedding
# else:
# F += embedding
# #F = np.concatenate((F,embedding), axis=0)
# _norm=np.linalg.norm(F)
# F /= _norm
# embeds.append(F)
# Batch_based Detection time per face : 0.004155
batch_images = []
for image in aligned_face_images:
image = np.transpose( image, (2,0,1) )
for flipid in [0,1]:
_img = np.copy(image)
if flipid==1:
_img = self.do_flip(_img)
batch_images.append(_img)
input_blob = np.array(batch_images)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
for net in self.nets:
net.model.forward(db, is_train=False)
_embedding = net.model.get_outputs()[0].asnumpy()#.flatten()
tmp = []
for i in range(0,len(_embedding),2):
mean_flip = (_embedding[i]+_embedding[i+1])/2
_norm=np.linalg.norm(mean_flip)
mean_flip/= _norm
tmp.append( mean_flip )
embeds.append(tmp)
# Instead of adding up, we temporary replace with mean
embeds = np.mean(embeds,axis=0)
for det, emb in zip(det_with_face, embeds):
det['face_embed'] = emb
return detections
def embed_imgs(self, images):
aligned_face_images = images
embeds =[]
# Batch_based Detection time per face : 0.004155
batch_images = []
for image in aligned_face_images:
image = np.transpose( image, (2,0,1) )
for flipid in [0,1]:
_img = np.copy(image)
if flipid==1:
_img = self.do_flip(_img)
batch_images.append(_img)
input_blob = np.array(batch_images)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
for net in self.nets:
net.model.forward(db, is_train=False)
_embedding = net.model.get_outputs()[0].asnumpy()#.flatten()
tmp = []
for i in range(0,len(_embedding),2):
mean_flip = (_embedding[i]+_embedding[i+1])/2
_norm=np.linalg.norm(mean_flip)
mean_flip/= _norm
tmp.append( mean_flip )
embeds.append(tmp)
return embeds
# TEST CODES
if __name__ == '__main__':
import time
import matplotlib.pyplot as plt
import glob
import os
os.chdir('../../')
import src.detector.detectors as detectors
# Detector and Embedder
Y_MTCNN = detectors.Yolov2_MTCNN()
embed=ArcFace_Embedder()
# Load Images
paths = glob.glob('./src/face_reid/test_images/*.jpg')
paths.sort()
dets = []
for img_path in paths:
test_img=cv2.imread(img_path)
s = time.time()
result_Y = Y_MTCNN.predict(test_img)
dets.append(result_Y[0])
e = time.time()
print('Detection time per frame : %f'%(e-s))
vis_img = test_img.copy()
for track in result_Y:
x1, y1, x2, y2 = track['person_bbox']
color = np.random.randint(low=0,high=255,size=3)
color = (int(color[0]),int(color[1]),int(color[2]))
cv2.rectangle(vis_img,(x1, y1), (x2, y2),color,5)
fx1, fy1, fx2, fy2 = track['face_bbox']
cv2.rectangle(vis_img, (x1+fx1, y1+fy1), (x1+fx2, y1+fy2), color, 5)
for pt in track['face_keypoint']:
cv2.circle(vis_img, (x1+pt[0], y1+pt[1]), 5, color,5 ,1)
plt.imshow(vis_img[:,:,::-1])
plt.show()
# Test Code
s = time.time()
dets = embed.embed(dets)
embed_features = [det['face_embed'] for det in dets]
e = time.time()
print('Detection time per face : %f'%((e-s)/len(dets)))
dis_chart = np.zeros((len(embed_features),len(embed_features)))
for i in range(len(embed_features)):
for j in range(len(embed_features)):
dis_chart[i,j]= np.sqrt( np.sum( np.square(embed_features[i] - embed_features[j]))+1e-12 )
sim_chart = np.zeros((len(embed_features),len(embed_features)))
for i in range(len(embed_features)):
for j in range(len(embed_features)):
sim_chart[i,j]= np.dot( embed_features[i], embed_features[j].T )
'''
if len(detections)>0:
have_face_indexs =[]
input_dets =[]
for idx,det in enumerate(detections):
if det['face_img'] is not None:
have_face_indexs.append(idx)
input_dets.append(det)
if len(input_dets)>0:
emb_results = self.FACE_EMBEDDER.embed(input_dets)
for i,e in zip(have_face_indexs,emb_results):
detections[i]['face_embed'] = e
''' | chunhanl/ElanGuard_Public | src/face_reid/embedders.py | embedders.py | py | 10,556 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "abc.ABC",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmethod",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmethod",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.flip... |
21107944564 | from classification.image_to_array import imageTonpv
from classification.cnn_class import cnn_class
import csv
from os import listdir
from os.path import isfile, join
import cv2
import numpy as np
def finalReport(label_ids = [], cids_test = [], class_count=[], class_viability =[], path = '', model='' ):
viability_ratio = []
num_counting = []
num_viability = []
label_set = sorted(set(label_ids))
for img_ids in label_set:
count = 0
viability = 0
for index, ids in enumerate(label_ids):
if ids == img_ids:
if class_count[index] < class_viability[index]:
class_viability[index] = class_count[index]
count = count + class_count[index]
viability = viability + class_viability[index]
if count < viability:
viability = count
# fix bug
if count == 0:
viability_ratio.append(0)
else:
viability_ratio.append(float(viability/count))
num_counting.append(count)
num_viability.append(viability)
label_format = []
for index, ids in enumerate(label_set):
label_format.append(str(format(ids, '05d')) + '_')
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
out = open(path + '/classification/' + str(timestr) + 'FINAL_REPORT' + model + '_' + 'CNN_csv.csv', 'a',
newline='')
csv_write = csv.writer(out, dialect='excel')
csv_write.writerow(label_format)
csv_write.writerow(viability_ratio)
csv_write.writerow(num_counting)
csv_write.writerow(num_viability)
def saveasSpectrum(label_ids = [], cids_test = [], class_count=[], class_viability =[],path = '', model='' ):
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
save_path = path + '/classification/'
DETECTION_test = np.load('classification/npv/VIABILITY/' + model + '/detection.npy')
label_set = sorted(set(label_ids))
for index, folder in enumerate(onlyfiles):
raw = cv2.imread(path + '\\' + folder)
ind = folder.split('RGB')[0]
new_imageBGR = raw
for index_ids, ids in enumerate(label_ids):
if list(label_set)[index] == ids and not len(DETECTION_test[index_ids]) == 0:
count = class_count[index_ids]
viability = class_viability[index_ids]
if count < viability:
viability = count
if count == 0:
green_level = 0
red_level = 255
blue_level = 255
else:
green_level = int(255 * viability / count)
red_level = int(255 * (1 - viability / count))
blue_level = 0
color_spectrum = (blue_level, green_level, red_level)
for position in DETECTION_test[index_ids]:
new_imageBGR[position[0], position[1]] = color_spectrum
cv2.imwrite(save_path + ind + model + '.png', new_imageBGR)
def classifyMain(folder_test = '', folder_train = '', analysis_type = dict()):
# counting model and viability model
if not analysis_type["predict_type"] == 0:
if analysis_type["model_type"] == 1:
# step 1.0 background preprocess and npv convert
imageTonpv(folder_test, 'UNET')
# step 2.0 load model weight and predict
label_ids, cids_test, class_count = cnn_class(folder_test, 'UNET', 'COUNTING')
label_ids, cids_test, class_viability = cnn_class(folder_test, 'UNET', 'VIABILITY')
# step 3.0 save final csv results and live-dead markers
finalReport(label_ids, cids_test, class_count, class_viability, folder_test, 'UNET')
saveasSpectrum(label_ids, cids_test, class_count, class_viability, folder_test, 'UNET')
print("U-NET complete")
elif analysis_type["model_type"] == 0:
# step 1.0 background preprocess and npv convert
imageTonpv(folder_test, 'watershed')
# step 2.0 load model weight and predict
label_ids, cids_test, class_count = cnn_class(folder_test, 'watershed', 'COUNTING')
label_ids, cids_test, class_viability = cnn_class(folder_test, 'watershed', 'VIABILITY')
# step 3.0 save final csv results and live-dead markers
finalReport(label_ids, cids_test, class_count, class_viability, folder_test, 'watershed')
saveasSpectrum(label_ids, cids_test, class_count, class_viability, folder_test, 'WATERSHED')
print("watershed complete")
elif analysis_type["model_type"] == 2:
# step 1.0 background preprocess and npv convert
imageTonpv(folder_test, 'UW')
# step 2.0 load model weight and predict
label_ids, cids_test, class_count = cnn_class(folder_test, 'UW', 'COUNTING')
label_ids, cids_test, class_viability = cnn_class(folder_test, 'UW', 'VIABILITY')
# step 3.0 save final csv results and live-dead markers
finalReport(label_ids, cids_test, class_count, class_viability, folder_test, 'UW')
saveasSpectrum(label_ids, cids_test, class_count, class_viability, folder_test, 'UW')
print("U-NET watershed complete")
print("classify complete") | chenxun511happy/Cartilage-Net | classification/ClassifyMain.py | ClassifyMain.py | py | 5,545 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "time.strftime",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number... |
26969763906 | import os
from utils.util import make_dir_under_root, read_dirnames_under_root
OUTPUT_ROOT_DIR_NAMES = [
'masked_frames',
'result_frames',
'optical_flows'
]
class RootInputDirectories:
def __init__(
self,
root_videos_dir,
root_masks_dir,
video_names_filename=None
):
self.root_videos_dir = root_videos_dir
self.root_masks_dir = root_masks_dir
if video_names_filename is not None:
with open(video_names_filename, 'r') as fin:
self.video_dirnames = [
os.path.join(root_videos_dir, line.split()[0])
for line in fin.readlines()
]
else:
self.video_dirnames = read_dirnames_under_root(root_videos_dir)
self.mask_dirnames = read_dirnames_under_root(root_masks_dir)
def __len__(self):
return len(self.video_dirnames)
class RootOutputDirectories:
def __init__(
self, root_outputs_dir,
):
self.output_root_dirs = {}
for name in OUTPUT_ROOT_DIR_NAMES:
self.output_root_dirs[name] = \
make_dir_under_root(root_outputs_dir, name)
def __getattr__(self, attr):
if attr in self.output_root_dirs:
return self.output_root_dirs[attr]
else:
raise KeyError(
f"{attr} not in root_dir_names {self.output_root_dirs}")
class VideoDirectories:
def __init__(
self, root_inputs_dirs, root_outputs_dirs, video_name, mask_name
):
self.name = f"video_{video_name}_mask_{mask_name}"
rid = root_inputs_dirs
rod = root_outputs_dirs
self.frames_dir = os.path.join(rid.root_videos_dir, video_name)
self.mask_dir = os.path.join(rid.root_masks_dir, mask_name)
self.masked_frames_dir = os.path.join(rod.masked_frames, self.name)
self.results_dir = os.path.join(rod.result_frames, self.name)
self.flows_dir = os.path.join(rod.optical_flows, video_name)
| amjltc295/Free-Form-Video-Inpainting | src/utils/directory_IO.py | directory_IO.py | py | 2,029 | python | en | code | 323 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "utils.util.read_dirnames_under_root",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "utils.u... |
29272947400 | import pymongo
import json
from pymongo import MongoClient
from bson import json_util
def StatImages():
client = MongoClient('mongodb://0.0.0.0:27017/')
db = client['diplom_mongo_1']
posts = db.posts
data = posts.find({"type": "image"})
count = 0
weight = 0
copies = 0
copiesId = {}
copiesIdList = []
imgFormat = {}
for item in data:
count += 1
weight += item['weight']
if (imgFormat.get(item)):
imgFormat[item['format']] += 1
else:
imgFormat[item['format']] = 1
flag = False
for item1 in data:
if item != item1:
if (item['md5'] == item1['md5']) and (item1['id'] not in copiesIdList):
if(flag):
copies += 1
copiesId[item['id']].append(item1['id'])
copiesIdList.append(item1['id'])
else:
copies += 2
copiesId[item['id']] = []
copiesId[item['id']].append(item1['id'])
flag = True
copiesIdList.append(item['id'])
copiesIdList.append(item1['id'])
print(copiesIdList) | dethdiez/viditory_analyzer | api/stat.py | stat.py | py | 964 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 7,
"usage_type": "call"
}
] |
14351509220 |
import mercantile
def get_blank_feature_json(lat, lon):
ft_dict = {"type": "Feature"}
geom_dict = {"type": "Point", "coordinates": [lon, lat]}
ft_dict["geometry"] = geom_dict
return ft_dict
# GET QUADHASH TILE OF A GIVEN COORDINATE
def get_quad_tile(lat, lon, precision):
ret = mercantile.tile(lon,lat,precision)
return ret
def get_quad_key_from_tile(x, y, zoom):
return mercantile.quadkey(x, y, zoom)
# GIVEN A QUAD_KEY, GET THE CORRESPONDING QUAD TILE
def get_tile_from_key(key):
return mercantile.quadkey_to_tile(key)
# GET QUADHASH STRING OF A GIVEN COORDINATE
def get_quad_key(lat, lon, zoom):
tile = get_quad_tile(lat, lon, precision=zoom)
#print(tile)
return get_quad_key_from_tile(tile.x, tile.y, tile.z)
#GIVEN A ZOOM LEVEL, WHAT IS THE MAX POSSIBLE TILE NUMBER HERE?
def get_max_possible_xy(zoom):
if zoom == 0:
return 0
return 2**zoom-1
# GIVEN A TILE, VERIFY IT IS VALID
def validate_tile(tile):
max_xy = get_max_possible_xy(tile.z)
if tile.x > max_xy or tile.x < 0 or tile.y > max_xy or tile.y < 0:
return False
return True
# GIVEN A BOX, FIND ALL TILES THAT LIE INSIDE THAT COORDINATE BOX
def find_all_inside_box(lat1, lat2, lon1, lon2, zoom):
all_tiles = []
top_left_quad_tile = get_quad_tile(lat2, lon1, zoom)
bottom_right_quad_tile = get_quad_tile(lat1, lon2, zoom)
print("TOP_LEFT & BOTTOM_RIGHT: ",top_left_quad_tile, bottom_right_quad_tile)
x1 = top_left_quad_tile.x
x2 = bottom_right_quad_tile.x
y1 = top_left_quad_tile.y
y2 = bottom_right_quad_tile.y
for i in range(x1, x2+1):
for j in range(y1,y2+1):
all_tiles.append(mercantile.Tile(x=i,y=j,z=zoom))
return all_tiles
#GIVEN A TILE, FIND THE SMALLER TILES THAT LIE INSIDE
def get_inner_tiles(tile_string):
combos = range(4)
children = []
for i in combos:
t_s = tile_string+str(i)
children.append(get_tile_from_key(t_s))
return children
#GIVEN A QUAD_TILE, GET ITS LAT-LNG BOUNDS
def get_bounding_lng_lat(tile_key):
tile = get_tile_from_key(tile_key)
bounds = mercantile.bounds(tile)
#print(tile_key, tile, bounds)
return (bounds.north, bounds.south, bounds.east, bounds.west)
if __name__ == '__main__':
tile_key = "02132333222"
tl = get_quad_tile(39.800137, -105.002746, 11)
print(get_quad_key_from_tile(tl.z,tl.y, tl.z))
'''for t in find_all_inside_box(40.33, 40.866726, -105.31, -104.96, 11):
print(get_quad_key_from_tile(t.x, t.y, t.z))'''
bounds = get_bounding_lng_lat("0231")
print(bounds)
print("BOUNDS>>", bounds[0],",",bounds[3], bounds[1],",",bounds[2])
xs = (49.2-25.86)/ (bounds[0]-bounds[1])
ys = (124.4 - 73.56) / (bounds[2] - bounds[3])
print(xs,ys)
| InsertCoolNameHere/Quby | geo_utils/quadtile_utils.py | quadtile_utils.py | py | 2,812 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mercantile.tile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mercantile.quadkey",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mercantile.quadkey_to_tile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "mercant... |
37970241909 | import cv2
import numpy as np
#frame = np.full((360, 480, 3), 0, dtype=int)
frame = cv2.imread("/home/pi/Pictures/2020-07-20_1439.jpg")
cv2.imshow("Frame", frame)
while True:
key = cv2.waitKey(1)
if key != -1:
print("Key", key)
if key == ord("q"): # up key
break
| webbhm/FlaskExperiment | python/test_key.py | test_key.py | py | 295 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 9,
"usage_type": "call"
}
] |
74306420987 | from dataclasses import dataclass
from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Integer,
MetaData, Numeric, String, Table, create_engine)
metadata = MetaData()
@dataclass
class IOLModel:
sql_path: str
def __post_init__(self):
self.metadata = MetaData()
self.model_tables()
self.create_engine()
self.create_database()
def model_tables(self):
"""Create table models"""
self.asset_class_country = Table(
'asset_class_country', self.metadata,
Column('id', Integer(), primary_key=True, autoincrement = True),
Column('asset_class', String(20), nullable=False),
Column('country', String(20), nullable=False)
)
self.fci_info = Table(
'fci_info', self.metadata,
Column('symbol', String(20), primary_key=True, unique=True, nullable=False),
Column('desc', String(50)),
Column('type', String(20)),
Column('adm_type', String(20)),
Column('horizon', String(20)),
Column('profile', String(20)),
Column('yearly_var', Numeric(5,5)),
Column('monthly_var', Numeric(5,5)),
Column('investment', String(300)),
Column('term', String(2)),
Column('rescue', String(2)),
Column('report', String(250)),
Column('regulation', String(250)),
Column('currency', String(20)),
Column('country', String(20)),
Column('market', String(20)),
Column('bloomberg', String(20)),
)
self.screen_last_price = Table(
'screen_last_price', self.metadata,
Column('country', String(20), nullable=False),
Column('asset_class', String(20), nullable=False),
Column('screen', String(20), nullable=False),
Column('symbol', String(20), primary_key=True, unique=True, nullable=False),
Column('desc', String(50)),
Column('date_time', DateTime()),
Column('open', Numeric(12,2)),
Column('high', Numeric(12,2)),
Column('low', Numeric(12,2)),
Column('close', Numeric(12,2)),
Column('bid_q', Numeric(12,2)),
Column('bid_price', Numeric(12,2)),
Column('ask_price', Numeric(12,2)),
Column('ask_q', Numeric(12,2)),
Column('vol', Numeric(12,2)),
)
self.screens_country_instrument = Table(
'screens_country_instrument', self.metadata,
Column('id', Integer(), primary_key=True, autoincrement = True),
Column('country', String(20), nullable=False),
Column('asset_class', String(20), nullable=False),
Column('screen', String(20), nullable=False),
)
self.symbol_daily = Table(
'symbol_daily', self.metadata,
Column('id', Integer(), primary_key=True, autoincrement = True),
Column('symbol', String(20), nullable=False),
Column('market', String(20), nullable=False),
Column('date', DateTime()),
Column('open', Numeric(12,2)),
Column('high', Numeric(12,2)),
Column('low', Numeric(12,2)),
Column('close', Numeric(12,2)),
Column('vol', Numeric(12,2)),
)
self.symbol_info = Table(
'symbol_info', self.metadata,
Column('symbol', String(20), primary_key=True, unique=True, nullable=False),
Column('market', String(20)),
Column('desc', String(50)),
Column('country', String(20)),
Column('type', String(20)),
Column('term', String(2)),
Column('currency', String(20)),
)
self.symbol_last_price = Table(
'symbol_last_price', self.metadata,
Column('id', Integer(), primary_key=True, autoincrement = True),
Column('symbol', String(20)),
Column('type', String(20)),
Column('date_time', DateTime()),
Column('open', Numeric(12,2)),
Column('high', Numeric(12,2)),
Column('low', Numeric(12,2)),
Column('close', Numeric(12,2)),
Column('bid_q', Numeric(12,2)),
Column('bid_price', Numeric(12,2)),
Column('ask_price', Numeric(12,2)),
Column('ask_q', Numeric(12,2)),
Column('vol', Numeric(12,2)),
Column('desc', String(50)),
Column('market', String(20)),
Column('currency', String(20)),
Column('country', String(20)),
Column('term', String(2)),
Column('lote', Numeric(12,2)),
Column('lamina_min', Numeric(12,2)),
Column('q_min', Numeric(12,2)),
Column('shown', Boolean()),
Column('buyable', Boolean()),
Column('sellable', Boolean()),
)
self.symbol_options = Table(
'symbol_options', self.metadata,
Column('underlying', String(20)),
Column('date_time', DateTime()),
Column('symbol', String(20), primary_key=True, unique=True, nullable=False),
Column('type', String(20)),
Column('expire', DateTime()),
Column('days_expire', Numeric(3)),
Column('desc', String(50)),
Column('strike', Numeric(12,2)),
Column('open', Numeric(12,2)),
Column('high', Numeric(12,2)),
Column('low', Numeric(12,2)),
Column('close', Numeric(12,2)),
Column('bid_ask', Numeric(12,2)),
# Column('bid_price', Numeric(12,2)),
# Column('ask_price', Numeric(12,2)),
# Column('ask_q', Numeric(12,2)),
Column('vol', Numeric(12,2)),
Column('var', Numeric(12,2)),
# Column('market', String(20)),
# Column('currency', String(20)),
Column('country', String(20)),
# Column('term', String(2)),
# Column('lote', Numeric(12,2)),
# Column('lamina_min', Numeric(12,2)),
# Column('q_min', Numeric(12,2)),
# Column('shown', Boolean()),
# Column('buyable', Boolean()),
# Column('sellable', Boolean()),
)
def create_engine(self):
"""Create an SQLite DB engine"""
self.engine = create_engine(f'sqlite:///{self.sql_path}')
def create_database(self):
"""Create DataBase from engine"""
self.metadata.create_all(self.engine)
# cookies = Table('cookies', metadata,
# Column('cookie_id', Integer(), primary_key=True),
# Column('cookie_name', String(50), index=True),
# Column('cookie_recipe_url', String(255)),
# Column('cookie_sku', String(55)),
# Column('quantity', Integer()),
# Column('unit_cost', Numeric(12, 2))
# )
# users = Table('users', metadata,
# Column('user_id', Integer(), primary_key=True),
# Column('customer_number', Integer(), autoincrement=True),
# Column('username', String(15), nullable=False, unique=True),
# Column('email_address', String(255), nullable=False),
# Column('phone', String(20), nullable=False),
# Column('password', String(25), nullable=False),
# Column('created_on', DateTime(), default=datetime.now),
# Column('updated_on', DateTime(), default=datetime.now, onupdate=datetime.now)
# )
# orders = Table('orders', metadata,
# Column('order_id', Integer(), primary_key=True),
# Column('user_id', ForeignKey('users.user_id'))
# )
# line_items = Table('line_items', metadata,
# Column('line_items_id', Integer(), primary_key=True),
# Column('order_id', ForeignKey('orders.order_id')),
# Column('cookie_id', ForeignKey('cookies.cookie_id')),
# Column('quantity', Integer()),
# Column('extended_cost', Numeric(12, 2))
# )
# engine = create_engine('sqlite:///:memory:')
# metadata.create_all(engine) | fscorrales/apys | src/apys/models/iol_model.py | iol_model.py | py | 7,935 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.MetaData",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.MetaData",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Co... |
25585522305 | import os
import requests
import re
import yaml
from packaging import version
# Update the avalanchego_vms_list variable in roles/node/vars
# with new VM versions available and their compatibility with AvalancheGo
GITHUB_RAW_URL = 'https://raw.githubusercontent.com'
GITHUB_API_URL = 'https://api.github.com'
VARS_YAML_PATH = '../roles/node/vars/main.yml'
VARS_YAML_HEADER_SIZE = 3
VMS_REPOS = {
'subnet-evm': 'ava-labs/subnet-evm',
}
MIN_AVAX_VERSION = '1.9.6'
vms_versions_comp = {}
# For each VM, fetch AvalancheGo compatibility info from README
for vm, repo in VMS_REPOS.items():
repo_info = requests.get(f'{GITHUB_API_URL}/repos/{repo}')
default_branch = repo_info.json()['default_branch']
readme_url = f'{GITHUB_RAW_URL}/{repo}/{default_branch}/README.md'
readme_raw = requests.get(readme_url)
compatibility_specs = list(
re.finditer(
r'^\[v(?P<vm_start_ver>\d+\.\d+\.\d+)-?v?(?P<vm_end_ver>\d+\.\d+\.\d+)?\] '
r'AvalancheGo@v(?P<avax_start_ver>\d+\.\d+\.\d+)-?v?(?P<avax_end_ver>\d+\.\d+\.\d+)?',
readme_raw.text,
flags=re.MULTILINE,
)
)
# Iterate on all versions
versions_comp = {}
for c in compatibility_specs:
vm_start_ver = version.parse(c.group('vm_start_ver'))
vm_end_ver = version.parse(c.group('vm_end_ver') or c.group('vm_start_ver'))
for major in range(vm_start_ver.major, vm_end_ver.major + 1):
for minor in range(vm_start_ver.minor, vm_end_ver.minor + 1):
for micro in range(vm_start_ver.micro, vm_end_ver.micro + 1):
if version.parse(c.group('avax_start_ver')) >= version.parse(
MIN_AVAX_VERSION
):
versions_comp.update(
{
f'{major}.{minor}.{micro}': {
'ge': c.group('avax_start_ver'),
'le': c.group('avax_end_ver')
or c.group('avax_start_ver'),
}
}
)
vms_versions_comp.update({vm: versions_comp})
vars_yaml_abs_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), VARS_YAML_PATH
)
with open(vars_yaml_abs_path) as vars_yaml:
vars_header = ''.join([vars_yaml.readline() for l in range(VARS_YAML_HEADER_SIZE)])
vars_obj = yaml.load(vars_yaml, Loader=yaml.CLoader)
# Enrich the avalanchego_vms_list with updated versions_comp
for vm, v_comp in vms_versions_comp.items():
vars_obj['avalanchego_vms_list'][vm]['versions_comp'] = v_comp
with open(vars_yaml_abs_path + '.updated', 'w') as vars_yaml:
vars_yaml.write(vars_header + yaml.dump(vars_obj, Dumper=yaml.CDumper))
| AshAvalanche/ansible-avalanche-collection | scripts/update_vm_versions.py | update_vm_versions.py | py | 2,832 | python | en | code | 10 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.finditer",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_number... |
28923395310 | from enums import PositionX, PositionY
from constants import AmmoIndicator as Properties
from functions import get_surface
from models.GameObject import PositionalGameObject
import pygame as pg
class AmmoIndicator(PositionalGameObject):
GROUP_NAME = 'ammo_indicator'
def __init__(self, scene, *groups, position_x=PositionX.RIGHT, position_y=PositionY.BOTTOM):
super().__init__(scene, position_x, position_y, *groups)
self.max = 0
self.current = 0
self.full_image = pg.image.load(Properties.FULL_IMAGE_PATH)
self.empty_image = pg.image.load(Properties.EMPTY_IMAGE_PATH)
self.column_count = 0
self.row_count = 0
self.set_current(self.max)
def attach_weapon(self, weapon):
self.max = weapon.CAPACITY
self.column_count = weapon.AMMO_INDICATOR_COLUMN_SIZE
self.row_count = (self.max + self.column_count - 1) // self.column_count
self.width = Properties.WIDTH * min(self.max, self.column_count)
self.width += Properties.OFFSET_X * (min(self.max, self.column_count) - 1) * int(self.max != 0)
self.height = Properties.HEIGHT * self.row_count
self.height += Properties.OFFSET_Y * (self.row_count - 1) * int(self.max != 0)
self.set_current(weapon.get_remaining())
def update_surface(self):
image = get_surface(self.width, self.height)
for i in range(self.max):
if i < self.current:
img = self.full_image
else:
img = self.empty_image
x = (Properties.WIDTH + Properties.OFFSET_X) * (i % self.column_count)
y = (Properties.HEIGHT + Properties.OFFSET_Y) * (i // self.column_count)
image.blit(img, (x, y))
self.set_image(image)
self.update_position()
def set_current(self, value):
if 0 <= value <= self.max:
self.current = value
self.update_surface()
| Thavin2147483648/shoot_platform | objects/AmmoIndicator.py | AmmoIndicator.py | py | 1,946 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.GameObject.PositionalGameObject",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "enums.PositionX.RIGHT",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "enums.PositionX",
"line_number": 11,
"usage_type": "name"
},
{
"... |
6757765764 | from django.shortcuts import render
import subprocess
def index(request):
if request.method == "POST":
link = request.POST["link"]
cont = request.POST["cont"]
# Baixa o torrent
subprocess.run(["transmission-cli", "-w", "./", link])
# Converte arquivos para MP4
subprocess.run(["ffmpeg", "-i", "*.webm", "-c:v", "copy", "-c:a", "copy", "webm.mp4"])
subprocess.run(["ffmpeg", "-i", "*.mkv", "-c:v", "copy", "-c:a", "copy", "mkv.mp4"])
subprocess.run(["ffmpeg", "-i", "*.avi", "-c:v", "copy", "-c:a", "copy", "avi.mp4"])
subprocess.run(["ffmpeg", "-i", "*.ts", "-c:v", "copy", "-c:a", "copy", "ts.mp4"])
# Renomeia o arquivo final
subprocess.run(["mv", "*.mp4", f"{cont}.mp4"])
# Move o arquivo final para o diretório do Apache2
subprocess.run(["sudo", "mv", f"{cont}.mp4", "/var/www/html/"])
# Adiciona a mensagem de sucesso ao template
success_message = f"Torrent baixado com sucesso! Arquivo final: {cont}.mp4"
return render(request, "index.html", {'success_message': success_message})
return render(request, "index.html")
| SrTristeSad/Download-torrent | views.py | views.py | py | 1,169 | python | vi | code | 0 | github-code | 6 | [
{
"api_name": "subprocess.run",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"li... |
21071664643 | from dataclasses import dataclass
from typing import Union
import numpy as np
from matplotlib import pyplot as plt
@dataclass
class SpeakerSegment:
start: int = 0
end: Union[int, None] = None
@dataclass
class SplitStuff4Tw:
threshold_value: float
split_index: int
class CustomSegmentationStrategy:
def __init__(self, analyze_only_seconds: int = 30):
self.analyze_only_seconds = analyze_only_seconds
self.min_seconds_preferred = 6
self.max_seconds_preferred = 15
self.sampling_rate = 16000
self.step_size = 200
self.step_size_seconds = self.step_size / 16000
self.number_steps = int(self.analyze_only_seconds / self.step_size_seconds)
self.median_probs = None
self.trig_sum = None
self.silence_seconds = 0.3
self.silence_window_nr_steps = int(self.silence_seconds / self.step_size_seconds)
self.trigger_window_seconds = 4.0
self.trigger_window_nr_steps = int(self.trigger_window_seconds / self.step_size_seconds)
def is_silence(self, buffer_window):
if np.mean(buffer_window) < self.trig_sum:
return True
return False
def is_above_threshold(self, buffer_window):
if np.mean(buffer_window) > self.trig_sum:
return True
return False
def convert_steps_to_samples(self, steps):
# 1 step is 200 samples or self.step_size
return steps * self.step_size
def create_better_split_long_length(self, buffer):
mid_of_clip = int(len(buffer) / 2)
# 2 seconds each side
two_seconds = 2 * 16000 / self.step_size
thresholds = []
for step_range in range(int(mid_of_clip - two_seconds), int(mid_of_clip + two_seconds),
self.silence_window_nr_steps):
threshold_value = np.mean(buffer[step_range + self.silence_window_nr_steps])
thresholds.append(SplitStuff4Tw(split_index=int(step_range + self.silence_window_nr_steps / 2),
threshold_value=threshold_value))
best_split = sorted(thresholds, key=lambda x: x.threshold_value, reverse=False)[0].split_index
return best_split
def create_better_split_short_length(self):
pass
def segment(self, speaker_vads: np.ndarray):
self.median = np.median(speaker_vads)
self.trig_sum = 0.89 * self.median + 0.08
final_segments = []
is_speech = False
current_buffer = []
temp_speaker_values = None
for i in range(len(speaker_vads)):
current_activation = speaker_vads[i]
current_buffer.append(current_activation)
if not len(current_buffer) >= self.trigger_window_nr_steps:
continue
if not is_speech and self.is_above_threshold(current_buffer):
is_speech = True
temp_speaker_values = SpeakerSegment(start=self.convert_steps_to_samples(i - len(current_buffer) + 1))
elif is_speech:
# If this but we are not above threshold, check if we are in silence for last steps
if self.is_silence(buffer_window=current_buffer[:-self.silence_window_nr_steps]):
if len(current_buffer) > self.sampling_rate * self.max_seconds_preferred / self.step_size:
# find_better split
# Todo: Do this recursively
split_index = self.create_better_split_long_length(buffer=current_buffer)
temp_speaker_values.end = self.convert_steps_to_samples(
i - (len(current_buffer) - split_index) - 1)
final_segments.append(temp_speaker_values)
temp_speaker_values = SpeakerSegment(
start=self.convert_steps_to_samples(i - (len(current_buffer) - split_index) + 1),
end=self.convert_steps_to_samples(i))
final_segments.append(temp_speaker_values)
temp_speaker_values = None
is_speech = False
current_buffer = []
elif len(current_buffer) < self.sampling_rate * self.min_seconds_preferred / self.step_size:
pass #Since we want at least x seconds, we continue here
else:
temp_speaker_values.end = self.convert_steps_to_samples(i)
final_segments.append(temp_speaker_values)
temp_speaker_values = None
is_speech = False
current_buffer = []
else:
# If not above threshold, then keep window constant
current_buffer.pop(0)
return final_segments
def plot_VAD(self, array_yo):
x = [self.step_size_seconds * i for i in range(self.number_steps)]
plt.plot(x, array_yo[:self.number_steps])
plt.show()
| centre-for-humanities-computing/Gjallarhorn | data_processing/custom_segmentation.py | custom_segmentation.py | py | 5,069 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
... |
72612614909 | """trt_face_detection.py
This script demonstrates how to do real-time face detection with
TensorRT optimized retinaface engine.
"""
import os
import cv2
import time
import argparse
import pycuda.autoinit # This is needed for initializing CUDA driver
from utils.camera import add_camera_args, Camera
from utils.display import open_window, set_display, show_fps
from utils.face_detection import TRT_RetinaFace
from utils.prior_box import PriorBox
from data import cfg_mnet
WINDOW_NAME = 'Face_detection'
def parse_args():
"""Parse input arguments."""
desc = ('Capture and display live camera video, while doing '
'real-time face detection with TensorRT optimized '
'retinaface model on Jetson')
parser = argparse.ArgumentParser(description=desc)
parser = add_camera_args(parser)
parser.add_argument(
'-m', '--model', type=str, required=True,
help=('[retinaface]-'
'[{dimension}], where dimension could be a single '
'number (e.g. 320, 640)'))
args = parser.parse_args()
return args
def loop_and_detect(cam, trt_retinaface, priors, cfg):
"""Continuously capture images from camera and do face detection.
# Arguments
cam: the camera instance (video source).
trt_retinaface: the TRT_RetinaFace face detector instance.
priors: priors boxes with retinaface model
cfg: retinaface model parameter configure
"""
full_scrn = False
fps = 0.0
tic = time.time()
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
break
img = cam.read()
if img is None:
break
facePositions, landmarks = trt_retinaface.detect(priors, cfg, img)
for (x1, y1, x2, y2), landmark in zip(facePositions, landmarks):
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.circle(img, (landmark[0], landmark[1]), 1, (0, 0, 255), 2)
cv2.circle(img, (landmark[2], landmark[3]), 1, (0, 255, 255), 2)
cv2.circle(img, (landmark[4], landmark[5]), 1, (255, 0, 255), 2)
cv2.circle(img, (landmark[6], landmark[7]), 1, (0, 255, 0), 2)
cv2.circle(img, (landmark[8], landmark[9]), 1, (255, 0, 0), 2)
img = show_fps(img, fps)
cv2.imshow(WINDOW_NAME, img)
toc = time.time()
curr_fps = 1.0 / (toc - tic)
# calculate an exponentially decaying average of fps number
fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05)
tic = toc
key = cv2.waitKey(1)
if key == 27: # ESC key: quit program
break
elif key == ord('F') or key == ord('f'): # Toggle fullscreen
full_scrn = not full_scrn
set_display(WINDOW_NAME, full_scrn)
def main():
args = parse_args()
if not os.path.isfile('retinaface/%s.trt' % args.model):
raise SystemExit('ERROR: file (retinaface/%s.trt) not found!' % args.model)
cam = Camera(args)
if not cam.isOpened():
raise SystemExit('ERROR: failed to open camera!')
cfg = cfg_mnet
input_size = args.model.split('-')[-1]
input_shape = (int(input_size), int(input_size))
priorbox = PriorBox(cfg, input_shape)
priors = priorbox.forward()
trt_retinaface = TRT_RetinaFace(args.model, input_shape)
open_window(
WINDOW_NAME, 'Camera TensorRT Face Detection Demo',
cam.img_width, cam.img_height)
loop_and_detect(cam, trt_retinaface, priors, cfg)
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| d246810g2000/tensorrt | face_recognition/trt_face_detection.py | trt_face_detection.py | py | 3,588 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "utils.camera.add_camera_args",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.ge... |
32124013729 |
import os
from gpt_interaction import process_content_with_gpt
from web_scraping import scrape_web_content
from utils import read_file, convert_pdf_to_text, convert_docx_to_text, convert_excel_to_csv
def process_files(input_dir, tmp_dir, output_dir):
for root, dirs, files in os.walk(input_dir):
for file in files:
file_path = os.path.join(root, file)
# Handle different file types
if file.endswith('.pdf'):
text_content = convert_pdf_to_text(file_path, tmp_dir)
elif file.endswith('.docx'):
text_content = convert_docx_to_text(file_path, tmp_dir)
elif file.endswith('.xlsx'):
text_content = convert_excel_to_csv(file_path, tmp_dir)
else:
text_content = read_file(file_path)
# Process text content with GPT-4 API
json_content, json_filename = process_content_with_gpt(text_content)
# Save JSON content to output directory
with open(os.path.join(output_dir, json_filename), 'w') as json_file:
json_file.write(json_content)
def process_links(links_file, tmp_dir, output_dir):
with open(links_file, 'r') as file:
urls = file.readlines()
for url in urls:
text_content = scrape_web_content(url.strip(), tmp_dir)
# Process text content with GPT-4 API
json_content, json_filename = process_content_with_gpt(text_content)
# Save JSON content to output directory
with open(os.path.join(output_dir, json_filename), 'w') as json_file:
json_file.write(json_content)
| vontainment/v-openai-data2json | file_handling.py | file_handling.py | py | 1,643 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.walk",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "utils.convert_pdf_to_text",
"lin... |
43464163341 |
from datetime import timezone
import datetime
import pytz
from .send_mail_view import SendMailView
from django.test import RequestFactory
import pytest
class TestSendMailView:
# Test that sending an email with correct parameters returns a 200 OK response.
def test_send_mail_with_correct_parameters(self):
view = SendMailView()
request = RequestFactory().get('/')
response = view.get(request)
assert response.status_code == 200
# Test that sending an email with incorrect parameters returns a 400 BAD REQUEST status code.
def test_send_mail_with_incorrect_parameters_returns_bad_request(self):
view = SendMailView()
request = RequestFactory().get('/')
response = view.enviar_correo(request)
assert response.status_code == 400
# Test that the conversion of UTC time to local time in the SendMailView class returns the correct time.
def test_converting_utc_to_local_time(self):
# Create an instance of SendMailView
send_mail_view = SendMailView()
# Define a UTC time
utc_time = datetime(2022, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
# Call the get_local_hour method with the UTC time
local_time = send_mail_view.get_local_hour(utc_time)
# Assert that the local time is correct
assert local_time == ('2022-01-01 07:00:00', 0, 0)
# Test that the Fibonacci series generated from the local time is correct
def test_fibonacci_series_from_local_time(self):
now = timezone.now()
view = SendMailView()
fibonacci_result = view.get_fibo_fron_local_hour(now)
assert fibonacci_result == [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89,
144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657]
# Test that sending an email with incorrect parameters returns a 400 BAD REQUEST status code.
def test_send_mail_with_incorrect_parameters_returns_bad_request(self):
view = SendMailView()
request = RequestFactory().get('/')
response = view.enviar_correo(request)
assert response.status_code == 400
| segpy/technical-tests | prote/drf-prote-test/apps/prote_test/views/test_send_mail_view.py | test_send_mail_view.py | py | 2,162 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "send_mail_view.SendMailView",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.test.RequestFactory",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "send_mail_view.SendMailView",
"line_number": 26,
"usage_type": "call"
},
{
... |
12177475498 | from django.urls import path
from . import views
urlpatterns = [
path('', views.homepage, name='home'),
path('id<int:id>', views.profile, name='profile'),
path('friends<int:user_id>', views.FriendsView.as_view(), name='friends'),
path('edit', views.edit_profile, name='edit_profile'),
path('friendship_request/<int:id>', views.friendship_request, name='friendship_request'),
path('telegraph', views.DialogsView.as_view(), name='dialogs'),
path('telegraph/dialogue/start/<user_id>', views.CreateDialogView.as_view(), name='start_dialog'),
path('telegraph/dialogue/<chat_id>', views.MessagesView.as_view(), name='messages'),
path('logout/', views.LogoutView.as_view(), name='logout'),
# api
# user posts
path('api/add_post', views.PostAPIAdd.as_view(), name='post_create'),
path('post_<int:postid>/update/<post_text>', views.PostAPIUpdate.as_view(), name='post_update'),
path('post_<int:postid>/remove', views.PostAPIRemove.as_view(), name='post_remove'),
path('post_<int:postid>/like', views.PostLikeAPIToggle.as_view(), name='post_like'),
path('post_<int:postid>/comment', views.PostAPIAddComment.as_view(), name='post_comment'),
# user
path('user_create/<username>_<email>_<password>_<first_name>_<last_name>', views.UserAPICreate.as_view(), name='create_user'),
path('user_update', views.UserAPIUpdate.as_view(), name='update_user'),
path('user_update_status', views.UserAPIUpdateStatus.as_view(), name='update_status_user'),
path('user_friendlist', views.UserAPIFriends.as_view(), name='friends_user'),
# chat
path('api/read/<chat_id>', views.ChatAPIMessagesRead.as_view(), name='api_read_messages'),
path('api/get_unreaded/<chat_id>', views.ChatAPIMessagesUnreaded.as_view(), name='api_unreaded_messages'),
path('api/send_message', views.ChatAPIMessagesSend.as_view(), name='api_send_message'),
path('api/remove_message/<message_id>', views.ChatAPIMessagesRemove.as_view(), name='api_remove_message'),
path('api/get_last_unreaded_message', views.ChatAPIMessagesGetUnreaded.as_view(), name='api_getlast_message'),
#path('get_mark_status/<int:id>', views.get_mark_status),
#path('get_marks_count/<int:id>', views.get_marks_count),
#path('/mail<int:id>', views.profile, name='mail'),
] | synchro123/LetsTalk | social/apps/main/urls.py | urls.py | py | 2,226 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
19773599717 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
Runs IBIES gui
"""
from __future__ import absolute_import, division, print_function
import multiprocessing
import utool as ut
import ibeis # NOQA
import sys
CMD = ut.get_argflag('--cmd')
# For Pyinstaller
#from ibeis.all_imports import * # NOQA
def dependencies_for_myprogram():
""" Let pyintaller find these modules
References:
http://stackoverflow.com/questions/18596410/importerror-no-module-named-mpl-toolkits-with-maptlotlib-1-3-0-and-py2exe
"""
from guitool.__PYQT__ import QtCore, QtGui # Pyinstaller hacks # NOQA
from PyQt4 import QtCore, QtGui # NOQA
#from PyQt4 import QtCore, QtGui # NOQA
from scipy.sparse.csgraph import _validation # NOQA
from scipy.special import _ufuncs_cxx # NOQA
from mpl_toolkits.axes_grid1 import make_axes_locatable # NOQA
#import lru # NOQA
# Workaround for mpl_toolkits
import importlib
importlib.import_module('mpl_toolkits').__path__
def run_ibeis():
r"""
CommandLine:
python -m ibeis
python -m ibeis find_installed_tomcat
python -m ibeis get_annot_groundtruth:1
"""
#ut.set_process_title('IBEIS_main')
#main_locals = ibeis.main()
#ibeis.main_loop(main_locals)
#ut.set_process_title('IBEIS_main')
cmdline_varags = ut.get_cmdline_varargs()
if len(cmdline_varags) > 0 and cmdline_varags[0] == 'rsync':
from ibeis.scripts import rsync_ibeisdb
rsync_ibeisdb.rsync_ibsdb_main()
sys.exit(0)
if ut.get_argflag('--devcmd'):
# Hack to let devs mess around when using an installer version
# TODO: add more hacks
#import utool.tests.run_tests
#utool.tests.run_tests.run_tests()
ut.embed()
# Run the tests of other modules
elif ut.get_argflag('--run-utool-tests'):
import utool.tests.run_tests
retcode = utool.tests.run_tests.run_tests()
print('... exiting')
sys.exit(retcode)
elif ut.get_argflag('--run-vtool-tests'):
import vtool.tests.run_tests
retcode = vtool.tests.run_tests.run_tests()
print('... exiting')
sys.exit(retcode)
elif ut.get_argflag(('--run-ibeis-tests', '--run-tests')):
from ibeis.tests import run_tests
retcode = run_tests.run_tests()
print('... exiting')
sys.exit(retcode)
if ut.get_argflag('-e'):
"""
ibeis -e print -a default -t default
"""
# Run dev script if -e given
import ibeis.dev # NOQA
ibeis.dev.devmain()
print('... exiting')
sys.exit(0)
# Attempt to run a test using the funciton name alone
# with the --tf flag
import ibeis.tests.run_tests
import ibeis.tests.reset_testdbs
ignore_prefix = [
#'ibeis.tests',
'ibeis.control.__SQLITE3__',
'_autogen_explicit_controller']
ignore_suffix = ['_grave']
func_to_module_dict = {
'demo_bayesnet': 'ibeis.algo.hots.demobayes',
}
ut.main_function_tester('ibeis', ignore_prefix, ignore_suffix,
func_to_module_dict=func_to_module_dict)
#if ut.get_argflag('-e'):
# import ibeis
# expt_kw = ut.get_arg_dict(ut.get_func_kwargs(ibeis.run_experiment),
# prefix_list=['--', '-'])
# ibeis.run_experiment(**expt_kw)
# sys.exit(0)
doctest_modname = ut.get_argval(
('--doctest-module', '--tmod', '-tm', '--testmod'),
type_=str, default=None, help_='specify a module to doctest')
if doctest_modname is not None:
"""
Allow any doctest to be run the main ibeis script
python -m ibeis --tmod utool.util_str --test-align:0
python -m ibeis --tmod ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --show
python -m ibeis --tf request_ibeis_query_L0:0 --show
./dist/ibeis/IBEISApp --tmod ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --show # NOQA
./dist/ibeis/IBEISApp --tmod utool.util_str --test-align:0
./dist/IBEIS.app/Contents/MacOS/IBEISApp --tmod utool.util_str --test-align:0
./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-utool-tests
./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-vtool-tests
"""
print('[ibeis] Testing module')
mod_alias_list = {
'exptdraw': 'ibeis.expt.experiment_drawing'
}
doctest_modname = mod_alias_list.get(doctest_modname, doctest_modname)
module = ut.import_modname(doctest_modname)
(nPass, nTotal, failed_list, error_report_list) = ut.doctest_funcs(module=module)
retcode = 1 - (len(failed_list) == 0)
#print(module)
sys.exit(retcode)
import ibeis
main_locals = ibeis.main()
execstr = ibeis.main_loop(main_locals)
# <DEBUG CODE>
if 'back' in main_locals and CMD:
#from ibeis.all_imports import * # NOQA
back = main_locals['back']
front = getattr(back, 'front', None) # NOQA
#front = back.front
#ui = front.ui
ibs = main_locals['ibs'] # NOQA
exec(execstr)
# </DEBUG CODE>
if __name__ == '__main__':
multiprocessing.freeze_support() # for win32
run_ibeis()
| smenon8/ibeis | ibeis/__main__.py | __main__.py | py | 5,256 | python | en | code | null | github-code | 6 | [
{
"api_name": "utool.get_argflag",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "utool.get_cmdline_varargs",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "i... |
70779220668 | import math
import os
import re
from ast import literal_eval
from dataclasses import dataclass
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from accelerate.logging import get_logger
from accelerate.utils import is_tpu_available
from sklearn.metrics import accuracy_score, average_precision_score, roc_auc_score
logger = get_logger(__name__, "INFO")
@dataclass
class Task:
name: str
num_classes: int
property: str
def get_task(task_name):
if re.findall("mortality|readmission|los", task_name):
return Task(task_name, 1, "binary")
elif re.findall("diagnosis", task_name):
return Task(task_name, 17, "multilabel")
elif re.findall("creatinine|platelets", task_name):
return Task(task_name, 5, "multiclass")
elif re.findall("wbc|bicarbonate|sodium", task_name):
return Task(task_name, 3, "multiclass")
elif re.findall("hb", task_name):
return Task(task_name, 4, "multiclass")
# To Load & Save n_epoch
class N_Epoch:
def __init__(self):
self.epoch = 0
def __call__(self):
return self.epoch
def increment(self):
self.epoch += 1
def state_dict(self):
return {"epoch": self.epoch}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
def load_model(path, model):
state_dict = torch.load(path, map_location="cpu")
if "pred_model.model.embed_positions.weight" in state_dict:
del state_dict["pred_model.model.embed_positions.weight"]
model.load_state_dict(state_dict, strict=False)
return model
def get_max_seq_len(args):
df = pd.read_csv(
os.path.join(
args.input_path, f"{args.pred_time}h", f"{args.src_data}_cohort.csv"
),
usecols=["time", "hi_start"],
)
if args.time >= 0:
df["hi_start"] = df["hi_start"].map(literal_eval).map(lambda x: x[args.time])
else:
df["hi_start"] = 0
max_seq_len = df.apply(
lambda x: x["time"].count(",") + 1 - x["hi_start"], axis=1
).max()
max_seq_len = math.ceil(max_seq_len / 128) * 128
return max_seq_len
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(
self, patience=7, verbose=True, delta=0, compare="increase", metric="avg_auroc"
):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.target_metric_min = 0
self.delta = delta
self.compare_score = self.increase if compare == "increase" else self.decrease
self.metric = metric
def __call__(self, target_metric):
update_token = False
score = target_metric
if self.best_score is None:
self.best_score = score
if self.compare_score(score):
self.counter += 1
logger.info(
f"EarlyStopping counter: {self.counter} out of {self.patience} ({target_metric:.6f})",
main_process_only=True,
)
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
if self.verbose:
logger.info(
f"Validation {self.metric} {self.compare_score.__name__}d {self.target_metric_min:.6f} --> {target_metric:.6f})",
main_process_only=True,
)
self.target_metric_min = target_metric
self.counter = 0
update_token = True
return update_token
def increase(self, score):
if score < self.best_score + self.delta:
return True
else:
return False
def decrease(self, score):
if score > self.best_score + self.delta:
return True
else:
return False
def state_dict(self):
return {
"best_score": self.best_score,
"counter": self.counter,
"early_stop": self.early_stop,
"target_metric_min": self.target_metric_min,
}
def load_state_dict(self, state_dict):
self.best_score = state_dict["best_score"]
self.counter = state_dict["counter"]
self.early_stop = state_dict["early_stop"]
self.target_metric_min = state_dict["target_metric_min"]
def reset(self):
self.counter = 0
self.early_stop = False
def log_from_dict(metric_dict, split, n_epoch):
log_dict = {"epoch": n_epoch, split: metric_dict}
return log_dict
class PredLoss:
def __init__(self, args):
self.args = args
# How to drop na in binary??
self.bce = nn.BCELoss(reduction="sum")
self.ce = nn.NLLLoss(reduction="sum", ignore_index=-1)
self.sim = nn.CosineSimilarity(dim=-1)
def __call__(self, output, reprs):
# NOTE: If null label is too many in binary/multilabel, it will be cause a nan loss.
losses, preds, truths, masks = {}, {}, {}, {}
loss_total = 0
# To suport Rag Retriever
tasks = [i for i in self.args.tasks if i.name in output["target"].keys()]
for task in tasks:
pred = output["pred"][task.name]
target = output["target"][task.name]
if task.property == "binary":
# Calculate mask for -1(NaN)
mask = (target != -1).bool()
pred = mask * pred
target = mask * target
loss = self.bce(pred, target)
elif task.property == "multilabel":
# Calculate mask for -1(NaN)
mask = (target.sum(axis=-1) > 0).bool().unsqueeze(-1)
pred = mask * pred
target = mask * target
loss = self.bce(pred, target) / task.num_classes
elif task.property == "multiclass":
mask = (target.sum(axis=-1) > 0).bool().unsqueeze(-1)
nl = (pred + 1e-10).log() # For numerical Stability
pred = mask * pred
nl = mask * nl
target = mask * target
loss = self.ce(nl, target.argmax(dim=1))
else:
raise NotImplementedError()
losses[task.name] = loss / self.args.local_batch_size
preds[task.name] = pred
truths[task.name] = target
masks[task.name] = mask
loss_total += loss
logging_outputs = {
# SHould detach or not??
"loss_total": loss_total,
"preds": preds,
"truths": truths,
"losses": losses,
"masks": masks,
}
return loss_total, logging_outputs
class BaseMetric:
def __init__(self, args, target):
self.args = args
self._update_target = target
self.is_tpu = is_tpu_available()
self.reset()
def reset(self):
raise NotImplementedError()
def __call__(self, out, accelerator=None):
raise NotImplementedError()
def get_metrics(self):
raise NotImplementedError()
def gather(self, accelerator, *args):
if accelerator is not None:
args = accelerator.gather_for_metrics(args)
args = [(i if i.shape else i.unsqueeze(0)) for i in args]
if len(args) == 1:
return args[0]
else:
return args
@property
def compare(self):
return "decrease" if "loss" in self.update_target else "increase"
@property
def update_target(self):
return self._update_target
class PredMetric(BaseMetric):
def __init__(self, args, target="avg_auroc"):
self.tasks = args.tasks
super().__init__(args, target)
def reset(self):
self.losses = {k.name: [] for k in self.tasks}
self.truths = {k.name: [] for k in self.tasks}
self.preds = {k.name: [] for k in self.tasks}
self.masks = {k.name: [] for k in self.tasks}
def __call__(self, out, accelerator=None):
# NOTE: On train step, only compute metrics for the master process
tasks = [i for i in self.tasks if i.name in out["preds"].keys()]
for task in tasks:
mask = out["masks"][task.name]
if task.property != "binary":
mask = mask.squeeze(-1)
truth = out["truths"][task.name]
pred = out["preds"][task.name]
loss = out["losses"][task.name]
truth, pred, mask, loss = self.gather(accelerator, truth, pred, mask, loss)
self.truths[task.name].append(truth.detach().cpu().float().numpy())
self.preds[task.name].append(pred.detach().cpu().float().numpy())
self.losses[task.name].append(loss.detach().cpu().float().numpy())
self.masks[task.name].append(mask.detach().cpu().numpy())
def get_metrics(self):
# For REMed
tasks = [i for i in self.tasks if len(self.preds[i.name]) != 0]
for task in tasks:
self.losses[task.name] = np.concatenate(self.losses[task.name], 0)
self.truths[task.name] = np.concatenate(self.truths[task.name], 0)
self.preds[task.name] = np.concatenate(self.preds[task.name], 0)
self.masks[task.name] = np.concatenate(self.masks[task.name], 0)
self.truths[task.name] = self.truths[task.name][self.masks[task.name]]
self.preds[task.name] = self.preds[task.name][self.masks[task.name]]
self.epoch_dict = {}
for task in tasks:
self.epoch_dict[task.name + "_loss"] = np.mean(self.losses[task.name])
self.epoch_dict[task.name + "_auprc"] = self.auprc(task)
self.epoch_dict[task.name + "_auroc"] = self.auroc(task)
self.epoch_dict[task.name + "_acc"] = self.acc(task)
self.epoch_dict["avg_loss"] = np.mean(
[self.epoch_dict[k] for k in self.epoch_dict.keys() if "loss" in k]
)
self.epoch_dict["avg_auprc"] = np.mean(
[self.epoch_dict[k] for k in self.epoch_dict.keys() if "auprc" in k]
)
self.epoch_dict["avg_auroc"] = np.mean(
[self.epoch_dict[k] for k in self.epoch_dict.keys() if "auroc" in k]
)
self.epoch_dict["avg_acc"] = np.mean(
[self.epoch_dict[k] for k in self.epoch_dict.keys() if "acc" in k]
)
self.reset()
return self.epoch_dict
def auroc(self, task):
return roc_auc_score(
self.truths[task.name],
self.preds[task.name],
average="micro",
multi_class="ovr",
)
def auprc(self, task):
return average_precision_score(
self.truths[task.name],
self.preds[task.name],
average="micro",
)
def acc(self, task):
if task.property in ["binary", "multilabel"]:
return accuracy_score(
self.truths[task.name].round(), self.preds[task.name].round()
)
elif task.property == "multiclass":
return accuracy_score(
self.truths[task.name].argmax(axis=1),
self.preds[task.name].argmax(axis=1),
)
else:
raise NotImplementedError()
| starmpcc/REMed | src/utils/trainer_utils.py | trainer_utils.py | py | 11,900 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "accelerate.logging.get_logger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.fin... |
42206728289 | import torch
from torch import nn
from torch.autograd import Variable
import torch.functional as F
from torch.optim import Adam
from torchvision.models import resnet50
# self
from vis import Vis
import vars
from data_loader import get_data_loader
from test import test
def train(epoch, model, train_loader, criterion, optimizer, vis):
model.train()
for i, (data, label) in enumerate(train_loader):
data = Variable(data).cuda() # gpu
label = Variable(label).cuda() # gpu
optimizer.zero_grad()
output = model(data)
loss = criterion(output, label)
loss.backward()
optimizer.step()
if i % 30 == 0:
status = 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch + 1, i * len(data), len(train_loader.dataset),
100. * i / len(train_loader), loss.item())
print(status)
vis.update_train(x=torch.Tensor([epoch + i/len(train_loader)]),
y=torch.Tensor([loss.item()]), status=status)
if __name__ == '__main__':
# load data and init
train_loader, test_loader = get_data_loader()
vis = Vis('bba_race resnet')
# model
model = resnet50()
input_size = model.fc.in_features
model.fc = nn.Linear(input_size, 20) # output 20 category
# load exist
# checkpoints = vars.checkpoint_path + 'res_net50_0.14.pt'
checkpoints = ''
if checkpoints:
model.load_state_dict(torch.load(checkpoints)) # load exist model
model.cuda() # gpu
# criterion, optimizer
criterion = nn.CrossEntropyLoss().cuda() # gpu
optimizer = Adam(model.parameters(), lr=0.01)
epoches = 1
for epoch in range(epoches):
train(epoch, model, train_loader, criterion, optimizer, vis)
# save the model
torch.save(model.state_dict(), vars.checkpoint_path + 'res_net50_{}.pt'.format(epoch))
test(epoch, model, test_loader, criterion, vis)
| DragonChen-TW/2018_bba_race | model/train.py | train.py | py | 1,970 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.autograd.Variable",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "vis.update_train",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torc... |
21681869620 | from datetime import date
from time import sleep
ano=int(input('Que ano quer analisar? Colo que 0 para analisar o ano atual:'))
print('Processando...')
sleep(2)
###################################################################################
if(ano==0):
ano=date.today().year
if((ano % 4 == 0) and (ano % 100 != 0) or (ano % 400 == 0)):
print('O ano {} é BISSEXTO.'.format(ano))
else:
print('O ano {} NÂO É BISSEXTO.'.format(ano)) | VitorFidelis/Exercicios-Python | Desafio032.py | Desafio032.py | py | 448 | python | gl | code | 2 | github-code | 6 | [
{
"api_name": "time.sleep",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 8,
"usage_type": "name"
}
] |
72532883389 | """
Multi-scale rabbit cardiac electrophysiology models
Rabbit Soltis-Saucerman model with full b-AR signalling (Rabbit SS 1D cardiac)
$ cd examples
$ make install-ci
$ make .env
SEE https://sparc.science/datasets/4?type=dataset
"""
import os
import sys
import time
from pathlib import Path
from time import sleep
from typing import Optional
import osparc
from dotenv import load_dotenv
from osparc.models import File, JobStatus
assert osparc.__version__ == "0.4.3"
current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
data_dir = current_dir / "data_rabbit_cardiac"
load_dotenv()
cfg = osparc.Configuration(
host=os.environ.get("OSPARC_API_URL", "http://127.0.0.1:8006"),
username=os.environ["OSPARC_API_KEY"],
password=os.environ["OSPARC_API_SECRET"],
)
print("Entrypoint", cfg.host)
with osparc.ApiClient(cfg) as api_client:
# Upload init states file.
files_api = osparc.FilesApi(api_client)
initial_wtstates_file = files_api.upload_file(
str(data_dir / "initial_WTstates.txt")
)
# Create our simulation.
solvers_api = osparc.SolversApi(api_client)
solver = solvers_api.get_solver_release(
"simcore/services/comp/rabbit-ss-1d-cardiac-model", "1.0.0"
)
# SEE data_rabbit_cardiac/ss1d_meta.json::inputs
job = solvers_api.create_job(
solver.id,
solver.version,
osparc.JobInputs(
{
"Na": 0,
"GKr": 1,
"TotalSimulationTime": 50,
"TargetHeartRatePhase1": 60,
"TargetHeartRatePhase2": 150,
"TargetHeartRatePhase3": 60,
"cAMKII": "WT",
"tissue_size_tw": 165,
"tissue_size_tl": 165,
"Homogeneity": "homogeneous",
"num_threads": 4,
"initialWTStates": initial_wtstates_file,
}
),
)
print("Job created", job)
# Start our simulation.
status = solvers_api.start_job(solver.id, solver.version, job.id)
start_t = time.perf_counter()
# Check the status of our simulation until it has completed.
while True:
status = solvers_api.inspect_job(solver.id, solver.version, job.id)
print(
f">>> Progress: {status.progress}% ",
f"[elapsed:{time.perf_counter() - start_t:4.2f}s]...",
flush=True,
)
if status.progress == 100:
break
sleep(1)
# Retrieve our simulation outputs.
print("---------------------------------------")
last_status: JobStatus = solvers_api.inspect_job(solver.id, solver.version, job.id)
print(">>> What is the status?", last_status)
outputs = solvers_api.get_job_outputs(solver.id, solver.version, job.id)
# SEE data_rabbit_cardiac/ss1d_meta.json::outputs
for output_name, result in outputs.results.items():
print(f">>> {output_name} = {result}")
# Retrieve our simulation results.
print("---------------------------------------")
result: Optional[File]
for output_name, result in outputs.results.items():
if result is None:
print(
"Can't retrieve our simulation results {output_name}...?!",
"Failed ?",
last_status.state,
"Finished ?",
last_status.progress == 100 or not last_status.stopped_at,
)
else:
# Print out the id of our simulation results file (?).
print("---------------------------------------")
print(">>> ", result.id)
# Download our simulation results file (?).
download_path: str = files_api.download_file(result.id)
print("Downloaded to", download_path)
print("Content-Type: ", result.content_type)
if result.content_type == "text/plain":
print("Result:", Path(download_path).read_text()[:100])
print("Status: ", Path(download_path).stat())
# List all the files that are available.
print("---------------------------------------")
print(files_api.list_files())
| ITISFoundation/osparc-simcore | tests/public-api/examples/rabbit_cardiac_ss1d.py | rabbit_cardiac_ss1d.py | py | 4,184 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "osparc.__version__",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "dotenv.load_dotenv... |
7239186990 | import cv2 as cv
import numpy as np
import imutils
path = "/home/pks/Downloads/Assignment/IVP/mini project/"
def orientation(image):
'''
Rotate the image before any operation
based on the pos. of roll no. box w.r.t number table
'''
row, col = image.shape[:2]
thresh = cv.Canny(image, 40, 90)
thresh = cv.dilate(thresh, None, iterations=1)
'''Find max (Number table) and 2nd max (Roll no. box) contour'''
cnts = cv.findContours(thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=lambda x: cv.contourArea(x), reverse=True)
c1, c2 = cnts[:2]
rect1, rect2 = cv.minAreaRect(c1), cv.minAreaRect(c2)
box1, box2 = cv.boxPoints(rect1), cv.boxPoints(rect2)
# Max
box1 = sorted(box1, key=lambda x: x[0])
r_most1, l_most1 = box1[-1], box1[0]
# 2nd Max
box2 = sorted(box2, key=lambda x: x[0])
r_most2, l_most2 = box2[-1], box2[0]
C1, C2 = min(col, row), max(col, row)
x,y = 600, 800
pts1 = np.float32([[0,row], [0,0], [col,row], [col,0]])
'''Roll no box is at right of number table, rotate left'''
if l_most2[0] >= r_most1[0]:
pts2 = np.float32([[x,y], [0,y], [x,0], [0,0]])
elif r_most2[0] <= l_most1[0]:
'''Opposite, rotate right'''
pts2 = np.float32([[0,0], [x,0], [0,y], [x,y]])
else:
return image
M = cv.getPerspectiveTransform(pts1,pts2)
image = cv.warpPerspective(image,M,(x,y))
return image
'''END'''
def intersection_bw_2_lines(l1, l2):
'''
Returns point of intersection between 2 lines
Parameters:
l1 : line1
l2 : line2
Returns:
x and y coordinate of point of intersection of l1 and l2
'''
rho1, theta1 = l1
rho2, theta2 = l2
A = np.array([
[np.cos(theta1), np.sin(theta1)],
[np.cos(theta2), np.sin(theta2)]
])
B = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(A, B)
x0, y0 = int(np.round(x0)), int(np.round(y0))
return [x0, y0]
def remove_mult_lines(set_of_lines, dist):
'''
Replaces all close lines within some threshold distance with a single one
Parameters:
set_of_lines : rho, theta value of all the lines
dist : maximum allowed distance b/w two seperate lines
Returns:
Well-seperated set of lines (in rho, theta form)
'''
temp, temp_lines = [], []
set_of_lines = sorted(set_of_lines, key=lambda x: (abs(x[0]), x[1]))
temp.append(set_of_lines[0])
for index,point in enumerate(set_of_lines):
if abs(abs(point[0])-abs(temp[-1][0])) <= dist:
temp.append(point)
if index == len(set_of_lines)-1:
temp_lines.append(temp[len(temp)//2])
# temp_lines.append(np.median(temp, axis=0))
else:
temp_lines.append(temp[len(temp)//2])
# temp_lines.append(np.median(temp, axis=0))
temp = [point]
if index == len(set_of_lines)-1:
temp_lines.append(point)
return temp_lines
def extract_roi(image):
'''
Extract the marks-table from the image and divide it into cells
Parametrs:
image : Given image
Returns:
extracted table and four points of each rectangular cell
'''
image = orientation(image.copy())
image = cv.resize(image.copy(), (600, 800))
cv.imshow("org", image)
cv.waitKey(0)
# Convert to gray image
gr_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# Thresholding
thresh = cv.Canny(gr_image, 40, 120)
# Closing
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (2, 3))
thresh = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)
row, col = image.shape[:2]
cv.imshow("thresh", thresh)
cv.waitKey(0)
# ROI Detection <--start-->
cnts = cv.findContours(thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
cnt = [list(el[0]) for el in c]
'''Removing some araeas not needed'''
b_r = max(cnt, key=lambda x: x[0]+x[1])
b_l = min(cnt, key=lambda x: x[0]-x[1])
b_r[1] = b_r[1] - 35
b_l[1] = b_l[1] - 35
m = (b_l[1]-b_r[1]) / (b_l[0]-b_r[0])
a, b, c = 1, (-1)*m, m*b_l[0] - b_l[1]
org_sign = a*0 + b*0 + c
thresh_r = np.array([np.array([(a*i + b*j + c) for j in range(col)]) for i in range(row)])
if org_sign > 0:
thresh[thresh_r < 0] = 0
else:
thresh[thresh_r > 0] = 0
'''END'''
'''Contour detection for extract the ROI'''
cnts = cv.findContours(thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
cnt = [list(el[0]) for el in c]
'''Four corners ofthe ROI'''
b_r = max(cnt, key=lambda x: x[0]+x[1])
t_l = min(cnt, key=lambda x: x[0]+x[1])
t_r = max(cnt, key=lambda x: x[0]-x[1])
b_l = min(cnt, key=lambda x: x[0]-x[1])
b_r[0], b_r[1] = b_r[0] + 2, b_r[1] + 0
b_l[0], b_l[1] = b_l[0] - 2, b_l[1] + 0
t_r[0], t_r[1] = t_r[0] + 2, t_r[1] - 2
t_l[0], t_l[1] = t_l[0] - 2, t_l[1] - 2
'''Extract only the ROI'''
w,h = 800, 600
# pts1 = np.float32(crop)
pts1 = np.float32([t_l, t_r, b_l, b_r])
# w,h = image.shape
pts2 = np.float32([[0,0], [h,0], [0,w], [h,w]])
M = cv.getPerspectiveTransform(pts1,pts2)
image = cv.warpPerspective(image,M,(h,w))
# ROI Detection <--end-->
cv.imshow("org", image)
cv.waitKey(0)
gr_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# TODO : Canny edge detection parameters
edges = cv.Canny(gr_image, 45, 90)
cv.imshow("edges", edges)
cv.waitKey(0)
# Hough Line Detection
lines = cv.HoughLines(edges,1,np.pi/180,150)
# Removing multiple ambiguous Lines <--start-->
points = np.array([[line[0][0], line[0][1]] for line in lines])
pi_val = np.pi
v1 = list(filter(lambda x: x[1]>=0 and x[1]<pi_val/4, points))
v2 = list(filter(lambda x: x[1]>=(3*pi_val)/4 and x[1]<(5*pi_val)/4, points))
v3 = list(filter(lambda x: x[1]>=(7*pi_val)/4 and x[1]<=pi_val*2, points))
vertical = v1 + v2 + v3
h1 = list(filter(lambda x: x[1]>=pi_val/4 and x[1]<(3*pi_val)/4, points))
h2 = list(filter(lambda x: x[1]>=(5*pi_val)/4 and x[1]<(7*pi_val)/4, points))
horizontal = h1 + h2
h_lines = remove_mult_lines(horizontal, 15)
v_lines = remove_mult_lines(vertical, 15)
lines = h_lines + v_lines
# # Removing multiple ambiguous Lines <--end-->
# Drawing the lines
line_image = image.copy()
for rho, theta in lines:
# rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(line_image,(x1,y1),(x2,y2),(0,0,255),1)
cv.imshow("lines", line_image)
cv.waitKey(0)
ret_cell = []
# Detecting cells
counter = 1
if len(h_lines) >= 14:
start = 1
else:
start = 0
for i in range(start,len(h_lines)-1):
for j in range(1,len(v_lines)-1):
hl1, hl2 = h_lines[i], h_lines[i+1]
vl1, vl2 = v_lines[j], v_lines[j+1]
p1 = intersection_bw_2_lines(hl1, vl1)
p2 = intersection_bw_2_lines(hl1, vl2)
p3 = intersection_bw_2_lines(hl2, vl1)
p4 = intersection_bw_2_lines(hl2, vl2)
ret_cell.append([p1, p2, p3, p4])
# cell = image[p1[1]:p3[1]+1, p1[0]:p2[0]+1]
# cv.imwrite(path + "img" + str(counter) + ".jpg", cell)
# counter = counter + 1
cv.destroyAllWindows()
return image, ret_cell | pritamksahoo/III-IV-YEAR-Assignments | IVP/extract_ROI.py | extract_ROI.py | py | 8,042 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "cv2.Canny",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_n... |
71781169789 | # Calculate FPS (Frames per second)
import cv2
from timeit import default_timer as timer
camera = cv2.VideoCapture(0)
frame_count = 0
total_time = 0
while camera.isOpened():
start_time = timer()
_, frame = camera.read()
frame_count += 1
elapsed_time = timer() - start_time
total_time += elapsed_time
FPS = float(frame_count / total_time)
print(f"FPS: {FPS:.3f}")
cv2.imshow('Webcam 0', frame)
# Press "q" to exit program
if cv2.waitKey(1) == ord('q'):
break
# Release the frames
camera.release()
# Destroy all windows
cv2.destroyAllWindows()
| yptheangel/opencv-starter-pack | python/basic/calculate_FPS.py | calculate_FPS.py | py | 596 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imshow"... |
30367005261 | from traits.api import Any, Enum, Int, Property, Union
from enable.api import NativeScrollBar
from .chaco_traits import Optional
class PlotScrollBar(NativeScrollBar):
"""
A ScrollBar that can be wired up to anything with an xrange or yrange
and which can be attached to a plot container.
"""
# The axis corresponding to this scrollbar.
axis = Enum("index", "value")
# The renderer or Plot to attach this scrollbar to. By default, this
# is just self.component.
plot = Property
# The mapper for associated with the scrollbar. By default, this is the
# mapper on **plot** that corresponds to **axis**.
mapper = Property
# ------------------------------------------------------------------------
# Private traits
# ------------------------------------------------------------------------
# The value of the override plot to use, if any. If None, then uses
# self.component.
_plot = Any()
# The value of the override mapper to use, if any. If None, then uses the
# mapper on self.component.
_mapper = Any()
# Stores the index (0 or 1) corresponding to self.axis
_axis_index = Optional(Int)
# ----------------------------------------------------------------------
# Public methods
# ----------------------------------------------------------------------
def force_data_update(self):
"""This forces the scrollbar to recompute its range bounds. This
should be used if datasources are changed out on the range, or if
the data ranges on existing datasources of the range are changed.
"""
self._handle_dataspace_update()
def overlay(self, component, gc, view_bounds=None, mode="default"):
self.do_layout()
self._draw_mainlayer(gc, view_bounds, "default")
def _draw_plot(self, gc, view_bounds=None, mode="default"):
self._draw_mainlayer(gc, view_bounds, "default")
def _do_layout(self):
if getattr(self.plot, "_layout_needed", False):
self.plot.do_layout()
axis = self._determine_axis()
low, high = self.mapper.screen_bounds
self.bounds[axis] = high - low
self.position[axis] = low
self._widget_moved = True
def _get_abs_coords(self, x, y):
if self.container is not None:
return self.container.get_absolute_coords(x, y)
else:
return self.component.get_absolute_coords(x, y)
# ----------------------------------------------------------------------
# Scrollbar
# ----------------------------------------------------------------------
def _handle_dataspace_update(self):
# This method reponds to changes from the dataspace side, e.g.
# a change in the range bounds or the data bounds of the datasource.
# Get the current datasource bounds
range = self.mapper.range
bounds_list = [
source.get_bounds()
for source in range.sources
if source.get_size() > 0
]
mins, maxes = zip(*bounds_list)
dmin = min(mins)
dmax = max(maxes)
view = float(range.high - range.low)
# Take into account the range's current low/high and the data bounds
# to compute the total range
totalmin = min(range.low, dmin)
totalmax = max(range.high, dmax)
# Compute the size available for the scrollbar to scroll in
scrollrange = (totalmax - totalmin) - view
if round(scrollrange / 20.0) > 0.0:
ticksize = scrollrange / round(scrollrange / 20.0)
else:
ticksize = 1
foo = (totalmin, totalmax, view, ticksize)
self.trait_setq(
range=foo,
scroll_position=max(
min(self.scroll_position, totalmax - view), totalmin
),
)
self._scroll_updated = True
self.request_redraw()
def _scroll_position_changed(self):
super()._scroll_position_changed()
# Notify our range that we've changed
range = self.mapper.range
view_width = range.high - range.low
new_scroll_pos = self.scroll_position
range.set_bounds(new_scroll_pos, new_scroll_pos + view_width)
# ----------------------------------------------------------------------
# Event listeners
# ----------------------------------------------------------------------
def _component_changed(self, old, new):
# Check to see if we're currently overriding the value of self.component
# in self.plot. If so, then don't change the event listeners.
if self._plot is not None:
return
if old is not None:
self._modify_plot_listeners(old, "detach")
if new is not None:
self._modify_plot_listeners(new, "attach")
self._update_mapper_listeners()
def __plot_changed(self, old, new):
if old is not None:
self._modify_plot_listeners(old, "detach")
elif self.component is not None:
# Remove listeners from self.component, if it exists
self._modify_plot_listeners(self.component, "detach")
if new is not None:
self._modify_plot_listeners(new, "attach")
self._update_mapper_listeners()
elif self.component is not None:
self._modify_plot_listeners(self.component, "attach")
self._update_mapper_listeners()
def _modify_plot_listeners(self, plot, action="attach"):
if action == "attach":
remove = False
else:
remove = True
plot.observe(
self._component_bounds_handler, "bounds.items", remove=remove
)
plot.observe(
self._component_pos_handler, "position.items", remove=remove
)
def _component_bounds_handler(self, event):
self._handle_dataspace_update()
self._widget_moved = True
def _component_pos_handler(self, event):
self._handle_dataspace_update()
self._widget_moved = True
def _update_mapper_listeners(self):
# if self._mapper
pass
def _handle_mapper_updated(self):
self._handle_dataspace_update()
# ------------------------------------------------------------------------
# Property getter/setters
# ------------------------------------------------------------------------
def _get_plot(self):
if self._plot is not None:
return self._plot
else:
return self.component
def _set_plot(self, val):
self._plot = val
def _get_mapper(self):
if self._mapper is not None:
return self._mapper
else:
return getattr(self.plot, self.axis + "_mapper")
def _set_mapper(self, new_mapper):
self._mapper = new_mapper
def _get_axis_index(self):
if self._axis_index is None:
return self._determine_axis()
else:
return self._axis_index
def _set_axis_index(self, val):
self._axis_index = val
# ------------------------------------------------------------------------
# Private methods
# ------------------------------------------------------------------------
def _get_axis_coord(self, event, axis="index"):
"""Returns the coordinate of the event along the axis of interest
to this tool (or along the orthogonal axis, if axis="value").
"""
event_pos = (event.x, event.y)
if axis == "index":
return event_pos[self.axis_index]
else:
return event_pos[1 - self.axis_index]
def _determine_axis(self):
"""Determines whether the index of the coordinate along this tool's
axis of interest is the first or second element of an (x,y) coordinate
tuple.
This method is only called if self._axis_index hasn't been set (or is
None).
"""
if self.axis == "index":
if self.plot.orientation == "h":
return 0
else:
return 1
else: # self.axis == "value"
if self.plot.orientation == "h":
return 1
else:
return 0
| enthought/chaco | chaco/plotscrollbar.py | plotscrollbar.py | py | 8,287 | python | en | code | 286 | github-code | 6 | [
{
"api_name": "enable.api.NativeScrollBar",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "traits.api.Enum",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "traits.api.Property",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "traits.... |
9920081347 | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("new_listing", views.new_listing, name="new_listing"),
path("view_watchlist", views.view_watchlist, name="view_watchlist"),
path("categories", views.categories, name="categories"),
path("<int:listing_id>", views.listing, name="listing"),
path("<int:listing_id>/bid", views.bid, name="bid"),
path("<int:listing_id>/watchlist", views.watchlist, name="watchlist"),
path("<int:listing_id>/close", views.close, name="close"),
path("<int:listing_id>/comment", views.comment, name="comment"),
path("auctions/<str:category>", views.category, name="category")
]
| SaraRayne/Commerce | commerce/auctions/urls.py | urls.py | py | 862 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
25090333654 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, zSize=10):
super(Model, self).__init__()
self.zSize = zSize
def create(self, opts):
self.scale_factor = 8 / (512 / opts.imsize)
self.nLatentDims = opts.nLatentDims
self.nChIn = opts.nChIn
self.nChOut = opts.nChOut
self.nOther = opts.nOther
self.dropoutRate = opts.dropoutRate
self.opts = opts
self.create_autoencoder()
self.create_adversary()
self.assemble()
def create_autoencoder(self):
scale = self.scale_factor
# Create encoder (generator)
self.encoder = nn.Sequential(
nn.Conv2d(self.nChIn, 64, 4, 2, 1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.Conv2d(64, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.PReLU(),
nn.Conv2d(128, 256, 4, 2, 1),
nn.BatchNorm2d(256),
nn.PReLU(),
nn.Conv2d(256, 512, 4, 2, 1),
nn.BatchNorm2d(512),
nn.PReLU(),
nn.Conv2d(512, 1024, 4, 2, 1),
nn.BatchNorm2d(1024),
nn.PReLU(),
nn.Conv2d(1024, 1024, 4, 2, 1),
nn.BatchNorm2d(1024),
nn.PReLU(),
nn.Flatten(),
nn.PReLU(),
nn.Linear(1024 * scale * scale, self.nLatentDims),
nn.BatchNorm1d(self.nLatentDims)
)
# Create decoder
self.decoder = nn.Sequential(
nn.Linear(self.nLatentDims, 1024 * scale * scale),
nn.Unflatten(1, (1024, scale, scale)),
nn.PReLU(),
nn.ConvTranspose2d(1024, 1024, 4, 2, 1),
nn.BatchNorm2d(1024),
nn.PReLU(),
nn.ConvTranspose2d(1024, 512, 4, 2, 1),
nn.BatchNorm2d(512),
nn.PReLU(),
nn.ConvTranspose2d(512, 256, 4, 2, 1),
nn.BatchNorm2d(256),
nn.PReLU(),
nn.ConvTranspose2d(256, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.PReLU(),
nn.ConvTranspose2d(128, 64, 4, 2, 1),
nn.BatchNorm2d(64),
nn.PReLU(),
nn.ConvTranspose2d(64, 1, 4, 2, 1),
nn.Sigmoid()
)
self.encoder.apply(weights_init)
self.decoder.apply(weights_init)
def create_adversary(self):
# Create adversary (discriminator)
noise = 0.1
self.adversary = nn.Sequential(
nn.Linear(self.nLatentDims, 1024),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(1024, 1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 1),
nn.Sigmoid()
)
self.adversary.apply(weights_init)
def assemble(self):
self.autoencoder = nn.Sequential(self.encoder, self.decoder)
def forward(self, x):
return self.autoencoder(x), self.adversary(x) | TylerJost/learnPytorch | autoencoders/aaeGaudenz.py | aaeGaudenz.py | py | 3,140 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
71366123387 | #!/usr/bin/env python
#-*-coding: utf-8 -*-
import numpy as np
import numpy.linalg as LA
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
def plot_regret(rewards, bestRewards, label, filename):
sumRewards = np.cumsum(rewards)
sumBestRewards = np.cumsum(bestRewards)
regret = (sumBestRewards - sumRewards)
fig = plt.figure(figsize=(7, 6))
plt.plot(np.arange(1,len(regret)+1), regret, label=label)
plt.legend()
plt.savefig('./img/'+filename+'.png')
plt.close(fig)
return
def plot_beta_estimation(betaEstimations, filename):
fig = plt.figure(figsize=(7, 6))
plt.axis([0, len(betaEstimations), 0, 1.])
plt.plot(np.arange(1,len(betaEstimations)+1),betaEstimations,label='loss distance distance')
plt.legend()
plt.savefig('./img/'+filename+'.png')
plt.close(fig)
return
def plot_contexts_and_beta(AllContexts, theta, beta_estimation, filename):
fig = plt.gcf()
plt.gca().set_xlim((0.,1.2))
plt.gca().set_ylim((0.,1.2))
plt.gca().plot(np.array(map(lambda x: x[0], AllContexts)), # plot context vectors
np.array(map(lambda x: x[1], AllContexts)),
'o',color='black')
plt.gca().plot(theta[0], theta[1],'o',color='blue') # plot theta vector (hidden vector)
normalisation = LA.norm(np.array([beta_estimation[0], beta_estimation[1]]))
plt.gca().plot(beta_estimation[0] / normalisation, # plot beta estimation
beta_estimation[1] / normalisation,
'o',color='red')
fig.gca().add_artist(plt.Circle((0,0),1.,color='b',fill=False))
for i, x in enumerate(AllContexts):
fig.gca().annotate('%d' % i, xy=(x[0],x[1]), xytext=(x[0], x[1]),
arrowprops=dict(facecolor='black', shrink=0.05),
)
fig.savefig('img/'+filename+'.png')
| jeppe/Adaptive-Social-Search | linucb/plot_utils.py | plot_utils.py | py | 1,894 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
... |
2061507228 | from sklearn.preprocessing import OneHotEncoder
import numpy as np
class CategoricalEncoder:
""" if scikit >= 0.20, better use scikit's version instead of this class """
def __init__(self, dense=True):
assert dense, "only dense output is supported"
def fit(self, X):
self._str_to_int = {}
X_int = np.empty(X.shape, dtype=np.int32)
for i, row in enumerate(X):
for j, v in enumerate(row):
int_v = self._str_to_int.get(v)
if int_v is None:
int_v = len(self._str_to_int) + 1
self._str_to_int[v] = int_v
X_int[i, j] = int_v
self._one_hot = OneHotEncoder(sparse=False).fit(X_int)
return self
def transform(self, X):
X_int = np.empty(X.shape, dtype=np.int32)
for i, row in enumerate(X):
for j, v in enumerate(row):
X_int[i, j] = self._str_to_int.get(v, 0)
return self._one_hot.transform(X_int)
| rom1mouret/cheatmeal | benchmarks/preproc/categorical_encoder.py | categorical_encoder.py | py | 1,011 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "numpy.empty",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nump... |
11812216467 | from typing import Optional
import torch
import torch.nn as nn
class JaccardLoss(nn.Module):
"""JaccardLoss optimize mIoU score directly.
Args:
num_classes (int): A number of unique classes.
ignore_index (Optional[int]): Class label to ignore calculating score.
eps (float): Used to prevent zero division.
"""
def __init__(
self, num_classes: int, ignore_index: Optional[int] = None,
eps: float = 1e-16
):
super(JaccardLoss, self).__init__()
self.num_classes: int = num_classes
self.ignore_index = ignore_index
self.eps = eps
def forward( # type: ignore
self,
inputs: torch.Tensor,
targets: torch.Tensor
) -> torch.Tensor:
inputs = torch.argmax(inputs, dim=1)
inputs = inputs.byte().flatten()
targets = targets.byte().flatten()
if self.ignore_index is not None:
is_not_ignore = targets != self.ignore_index
inputs = inputs[is_not_ignore]
targets = targets[is_not_ignore]
intersection = inputs[inputs == targets]
area_intersection = intersection.bincount(minlength=self.num_classes)
bincount_pred = inputs.bincount(minlength=self.num_classes)
bincount_true = targets.bincount(minlength=self.num_classes)
area_union = bincount_pred + bincount_true - area_intersection
mean_iou = torch.mean(area_intersection / (area_union + self.eps))
return mean_iou
| yutayamazaki/semantic-segmentation-pytorch | src/losses/jaccard.py | jaccard.py | py | 1,507 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_... |
20175364599 | from decimal import Decimal
from django.conf import settings
from django.urls import reverse
from django.shortcuts import render, get_object_or_404
from core.models import Player, Developer, Payment, Order
from django.views.decorators.csrf import csrf_exempt
from hashlib import md5
from payments.forms import PaymentForm
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
import secrets
import datetime
# Secret key identifying the webstore.
secret_key = "4c5e699656586b17e3775a51281cb3d0"
"""
Renders the view for succesful payment and adds the game to the players inventory.
Also sends a confirmation email to the user of the completed purchase.
"""
@csrf_exempt
def payment_done(request):
if request.GET['result'] == 'success':
pid = request.GET['pid']
payment = get_object_or_404(Payment, payment_id=pid)
order = payment.order
ref = request.GET['ref']
result = request.GET['result']
amount = '%.2f' % order.get_total_cost().quantize(Decimal('.01'))
checksumstr = "pid={}&ref={}&result={}&token={}".format(pid, ref, result, secret_key)
m = md5(checksumstr.encode("ascii"))
checksum = m.hexdigest()
if checksum == request.GET["checksum"]:
order.paid = True
order.updated = datetime.datetime.now()
order.save()
items = order.items.all()
uid = request.user.id
player = get_object_or_404(Player, user_id=uid)
games = []
for item in items:
player.games.add(item.game)
item.game.times_bought += 1
games.append(item.game)
item.game.save()
player.save()
payment.delete(keep_parents=True)
# The confirmation email.
mail_subject = 'Thank you for your purchase!'
message = render_to_string('payments/done_email.html', {
'user': request.user,
'first_name': order.first_name,
'last_name': order.last_name,
'email': order.email,
'address': order.address,
'postal_code': order.postal_code,
'city': order.city,
'games': games,
'price': order.get_total_cost()})
to_email = order.email
email = EmailMessage(mail_subject, message, to=[to_email])
email.send()
return render(request, 'payments/done.html')
else:
return render(request, 'payments/error.html')
else:
return render(request, 'payments/error.html')
"""
Renders the canceled payment page.
"""
@csrf_exempt
def payment_canceled(request):
pid = request.GET['pid']
payment = get_object_or_404(Payment, payment_id=pid)
payment.delete(keep_parents=True)
return render(request, 'payments/canceled.html')
"""
Renders the error -page when there is an error with the payment
"""
@csrf_exempt
def payment_error(request):
pid = request.GET['pid']
payment = get_object_or_404(Payment, payment_id=pid)
payment.delete(keep_parents=True)
return render(request, 'payments/error.html')
"""
Processes the payment of the order. Creates the values for the post message needed
for the mockup payment size.
"""
def payment_process(request):
order_id = request.session.get('order_id')
order = get_object_or_404(Order, id=order_id)
host = request.get_host()
pid = secrets.randbelow(1000000000)
Payment.objects.create(payment_id=pid, order=order)
sid = 'thebestgamestore'
amount = '%.2f' % order.get_total_cost().quantize(Decimal('.01'))
checksumstr = "pid={}&sid={}&amount={}&token={}".format(pid, sid, amount, secret_key)
m = md5(checksumstr.encode("ascii"))
checksum = m.hexdigest()
# Inputs for the POST -message.
payment_details = {
'pid': pid,
'sid': sid,
'amount': amount,
'success_url': 'http://{}{}'.format(host, reverse('payments:done')),
'cancel_url': 'http://{}{}'.format(host, reverse('payments:canceled')),
'error_url': 'http://{}{}'.format(host, reverse('payments:error')),
'checksum': checksum
}
form = PaymentForm(payment_details)
return render(request, 'payments/process.html', {'order': order,
'form':form})
| vaarnaa/TheBestGameStore | payments/views.py | views.py | py | 4,464 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "core.models.Payment",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "decimal.Decimal",
"line_number": 29,
"usage_type": "call"
},
{
"api_na... |
40466717976 | import sys
from collections import deque
input = sys.stdin.readline
graph = []
for i in range(8):
graph.append(list(input().rstrip()))
answer = 0
def bfs():
direction = [[0,0],[0,-1],[0,1],[-1,0],[1,0],[-1,-1],[1,-1],[1,1],[-1,1]]
visited = [[0] * 8 for _ in range(8)]
dq = deque([7,0,0])
| Cho-El/coding-test-practice | 백준 문제/BFS/16954_움직이는 미로 탈출.py | 16954_움직이는 미로 탈출.py | py | 311 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
}
] |
71889266427 | import json
import sys
max_buy = float('-inf')
min_sell = float('inf')
for line in sys.stdin:
rec = json.loads(line.strip())
if 'price' not in rec:
continue
if rec['side'] == 'sell':
min_sell = min(min_sell, float(rec['price']))
else:
max_buy = max(max_buy, float(rec['price']))
print('max_buy: %s, min_sell: %s' % (max_buy, min_sell))
| fivetentaylor/intro_to_programming | coinbase/format_wss_feed.py | format_wss_feed.py | py | 383 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.stdin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 7,
"usage_type": "call"
}
] |
17772360699 | import streamlit as st
import pandas as pd
import numpy as np
import psycopg2
from streamlit_option_menu import option_menu
#------- PAGE SETTINGS------------
page_title = "GHG Emission Calculator"
Page_icon = "🌳"
layout = "centered"
#-----------------------------------
st.set_page_config(page_title=page_title,page_icon=Page_icon,layout=layout)
st.title(page_title + " " + Page_icon)
# --- HIDE STREAMLIT STYLE ---
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
# --- NAVIGATION MENU ---
selected = option_menu(
menu_title=None,
options=["Data Entry", "Data Visualization"],
icons=["pencil-fill", "bar-chart-fill"], # https://icons.getbootstrap.com/
orientation="horizontal",
)
@st.cache_resource
def get_data ():
path = "Emissions.xlsx"
return pd.read_excel(path,sheet_name="Sheet2",usecols="A:I")
#----remember to remove duplicates
data = get_data()
data_na = data.dropna()
if selected == "Data Entry":
options1 = data_na.iloc[:,0].unique()
selected_option1 = st.selectbox("Select Scope:",options1)
#----filtering scope-------
filtered_data = data_na[data_na.iloc[:,0]==selected_option1]
#----get unique values for option 2-----
option2 = filtered_data.iloc[:,1].unique()
selected_option2 = st.selectbox("Select Category:",option2)
#-----filter based on option 2-----
filter_2 = filtered_data[filtered_data.iloc[:,1]==selected_option2]
option3 = filter_2.iloc[:,2].unique()
selected_option3 = st.selectbox("Select Sub Category:",option3)
#----filter based on option 3----
filter_3 = filter_2[filter_2.iloc[:,2]== selected_option3]
option4 = filter_3.iloc[:,3].unique()
selected_option4 = st.selectbox("Select Material:",option4)
#-----filter based on option 4----
filter_4 = filter_3[filter_3.iloc[:,3]==selected_option4]
option5 = filter_4["UOM"].unique()
selected_option5 = st.selectbox("Select Unit of Measure:",option5)
#----filter based on option 5-------
filter_5 = filter_4[filter_4["UOM"]== selected_option5]
option6 = filter_5["GHG/Unit"].unique()
selected_option6 = st.selectbox("Select Unit:",option6)
#-----filter based on last option-----
filter_6 = filter_5[filter_5["GHG/Unit"]== selected_option6]
option_7 = filter_6["GHG Conversion Factor 2022"].unique()
selected_option7 = st.selectbox("Emission Factor:",option_7)
#option7_int = int(selected_option7)
#----create an input field-------
with st.form("my_form", clear_on_submit=True):
values = st.number_input("Enter Amount",format="%i",min_value=0)
values_int = int(values)
#----multiplying the two columns together to find total emission----
emission = int(selected_option7 * values_int)
total = st.number_input("Total Emissions:",emission)
#---Creating the submit button-------------
submitted = st.form_submit_button("Save Data")
if submitted:
selected_option1 = selected_option1
selected_option2 = selected_option2
selected_option3 = selected_option3
selected_option4 = selected_option4
values = values
total = total
st.success("Data Saved Successfully!")
| sforson14/Data | myfile.py | myfile.py | py | 3,457 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "streamli... |
8413183584 | # %% markdown
## Experiment 3 Trials
# %%
import numpy as np
import fire
import random
import pandas as pd
import json
from itertools import product
from markdown import markdown
import textwrap
from copy import deepcopy
import os, sys, json, pprint
from vgc_project.gridutils import transformations, getFeatureXYs
# %% codecell
def main(BASEGRIDS_FILENAME, EXP_NAME, EXP_CONFIG_FILE):
# %% codecell
basegrids = json.load(open(BASEGRIDS_FILENAME, 'r'))
sharedparams = {
"feature_colors": {
"#": "black",
"G": "yellow",
"S": "white",
".": "white",
**{i: "mediumblue" for i in "0123456"}
},
"wall_features": ["#", ] + list("0123456"),
"show_rewards": False,
}
nRounds = 4*2
roundBonusCents = 15
INITIALGOAL_COUNTDOWN_SEC = 60000
EXPERIMENTVERSION = "1.7c"
emptygrid = [
"............G",
".............",
".............",
"......#......",
"......#......",
"......#......",
"...#######...",
"......#......",
"......#......",
"......#......",
".............",
".............",
"S............"
]
instructionstraining = [
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent("""
# Instructions
Thank you for participating in our experiment!
You will play a game where you control a blue circle on a grid. You can move up, down, left, or right by pressing the __arrow keys__⬆️⬇️⬅️➡️.
<img src="static/images/bluedotgrid.png" width="150px">
The <span style='background-color: yellow;'><b>Yellow</b></span> tile with the <span style="color: green"><b>green</b></span> square is the goal 👀.
<img src="static/images/goalsquare.png" width="150px">
The green square will shrink when you stand still. It will initially shrink slowly, and then shrink quickly once you start moving. Keep moving!
<br>
__Black__ tiles are walls that you cannot pass through ⛔️.
<br>
<span style="background-color: cornflowerblue;color:white"><b>Blue</b></span> tiles are obstacles that might change
between different rounds. You cannot pass through these either 🚫.
""")),
"timing_post_trial": 1000,
"continue_wait_time": 5000,
},
{
"type": "GridNavigation",
"round": 0,
"roundtype": "practice",
"bonus": False,
"message": """Get to the <span style='background-color: yellow;'>Yellow</span> goal. You cannot go through <br><span style='background-color: black;color: white'>Black</span> or <br><span style='background-color: cornflowerblue; color:white'>Blue</span> tiles.""",
"taskparams": {
"feature_array": [
"G.........000",
"............0",
".............",
"......#......",
"......#......",
"......#......",
"...#######...",
"......#......",
"......#......",
"......#......",
".............",
"0............",
"000.........."
],
"init_state":[12, 0],
"absorbing_states":[[0, 12]],
"name": "trainingA",
**sharedparams
},
"emptygrid": emptygrid,
"goalCountdown": True,
"participantStarts": False,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
},
{
"type": "GridNavigation",
"round": 1,
"roundtype": "practice",
"bonus": False,
"message": """Get to the <span style='background-color: yellow;'>Yellow</span> goal. You cannot go through <br><span style='background-color: black;color: white'>Black</span> or <br><span style='background-color: cornflowerblue; color:white'>Blue</span> tiles.""",
"taskparams": {
"feature_array": [
"G............",
".............",
".............",
"......#......",
"......#......",
"......#...0..",
".00#######000",
".0....#......",
".0....#......",
"......#......",
".............",
".............",
"............."
],
"init_state":[12, 0],
"absorbing_states":[[0, 12]],
"name": "trainingB",
**sharedparams
},
"emptygrid": emptygrid,
"goalCountdown": True,
"participantStarts": False,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
},
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent("""
# Instructions
Great! Now, you will be given similar grids, however, after your first move the
<span style="background-color: cornflowerblue;color:white"><b>Blue</b></span> tiles
will become invisible.
""")),
"timing_post_trial": 1000,
"continue_wait_time": 2000,
},
{
"type": "GridNavigation",
"round": 2,
"roundtype": "practice",
"bonus": False,
"message": """The <span style='background-color: cornflowerblue; color:white'>Blue</span> tiles turn invisible.""",
"taskparams": {
"feature_array": [
"G.........000",
"............0",
".............",
"......#......",
"......#......",
"......#......",
"...#######...",
"......#......",
"......#......",
"......#......",
".............",
"0............",
"000.........."
],
"init_state":[12, 0],
"absorbing_states":[[0, 12]],
"name": "trainingA",
**sharedparams,
"TILE_SIZE": 40
},
"emptygrid": emptygrid,
"goalCountdown": True,
"participantStarts": False,
"hideObstaclesOnMove": True,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
},
{
"type": "GridNavigation",
"round": 3,
"roundtype": "practice",
"bonus": False,
"message": """The <span style='background-color: cornflowerblue; color:white'>Blue</span> tiles turn invisible.""",
"taskparams": {
"feature_array": [
"G............",
".............",
".............",
"......#......",
"......#......",
"......#...0..",
".00#######000",
".0....#......",
".0....#......",
"......#......",
".............",
".............",
"............."
],
"init_state":[12, 0],
"absorbing_states":[[0, 12]],
"name": "trainingB",
**sharedparams
},
"emptygrid": emptygrid,
"goalCountdown": True,
"participantStarts": False,
"hideObstaclesOnMove": True,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
},
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent(f"""
# Instructions
Great!
<br>
Next, we will give you a series of {nRounds} rounds. For each round, you will receive a
bonus of {roundBonusCents} cents but <b>only if you reach the goal
without the green square disappearing</b>.
You can win a total bonus of up to ${nRounds*roundBonusCents/100:.2f}.
<br>
At the start of each round, we will show you a grid showing only the walls (black).
When you are ready to begin the round, press the __spacebar__.
The obstacles (<span style="background-color: cornflowerblue; color:white">blue</span>),
your start location, and goal will appear.
Remember, once you move, the blue obstacles will turn invisible!
""")),
"timing_post_trial": 1000,
"continue_wait_time": 5000,
}
]
location_instructions = [
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent("""
# ☝️ Please note the following
To transition between the rounds as smoothly as possible, we recommend using
one hand to press the spacebar and the other use the arrow keys 🙌.
<br>
Try to go as <u>quickly</u> and <u>carefully</u> as possible 💫.
<br>
In addition, we are interested in your thought process while navigating each maze 🤔.
Following each trial, we will ask you about where one of the obstacles was originally placed.
You will be shown two possible locations, and asked where it was in the maze you just did.
**Your answers to these questions will not affect your bonus, but please try to respond accurately.**.
<br>
Thanks again for participating in our experiment!
<br>
""")),
"timing_post_trial": 1000,
"continue_wait_time": 5000,
},
{
"type": 'survey-multi-choice',
"questions": [
{
"prompt": "If the green square disappears, you will not receive a bonus on that round:",
"options": ["True", "False"],
"required": True,
"name": "navCheck"
},
{
"prompt": "How much of a bonus will you receive for completing each maze before the green square disappears?",
"options": ["25 cents", "15 cents", "12 cents", "None"],
"required": True,
"name": "navBonusCheck"
},
{
"prompt": "How much of a bonus will you receive for answering the questions about what you paid attention to?",
"options": ["25 cents", "15 cents", "12 cents", "None"],
"required": True,
"name": "memoryBonusCheck"
}
]
}
]
awareness_instructions = [
{
"type": "CustomInstructions",
"instructions": markdown(textwrap.dedent("""
# ☝️ Please note the following
To transition between the rounds as smoothly as possible, we recommend using
one hand to press the spacebar and the other use the arrow keys 🙌.
<br>
Try to go as <u>quickly</u> and <u>carefully</u> as possible 💫.
<br>
In addition, we are interested in your thought process while navigating each maze 🤔.
Following each trial, we will ask you <u>how aware of each obstacle you were at any point</u>.
Your answer should reflect the amount you paid attention to an obstacle, whether it was
at the beginning or end of navigating the maze.
**Your answers to these questions will not affect your bonus**.
<br>
Finally, <b>the maze rounds will sometimes end randomly <u>before</u> you reach the goal</b>.
As long as the green square has not disappeared, you will receive your bonus on that round,
but we will still ask you questions about your thought process.
<br>
Thanks again for participating in our experiment!
<br>
""")),
"timing_post_trial": 1000,
"continue_wait_time": 5000,
},
{
"type": 'survey-multi-choice',
"questions": [
{
"prompt": "If the green square disappears, you will not receive a bonus on that round:",
"options": ["True", "False"],
"required": True,
"name": "navCheck"
},
{
"prompt": "How much of a bonus will you receive for completing each maze before the green square disappears?",
"options": ["25 cents", "15 cents", "12 cents", "None"],
"required": True,
"name": "navBonusCheck"
},
{
"prompt": "How much of a bonus will you receive for answering the questions about what you paid attention to?",
"options": ["25 cents", "15 cents", "12 cents", "None"],
"required": True,
"name": "memoryBonusCheck"
}
]
}
]
# %% codecell
posttask = [
{
"type": 'survey-text',
"questions": [
{
"prompt": "Please describe your process for answering the questions.",
"rows": 5,
"columns":50,
"required": True,
"name": "howAttention"
},
{
"prompt": "Any general comments?",
"rows": 5,
"columns":50,
"required": True,
"name": "generalComments"
},
],
},
{
"type": 'survey-likert',
"questions": [
{
"prompt": "How often do you play video games?",
"labels": ["Never", "Every few months", "Monthly", "Weekly", "Daily"],
"required": True,
"name": "videogames"
}
]
},
{
"type": 'survey-text',
"questions": [
{"prompt": "Age", "required": True, "name": "age"},
{"prompt": "Gender", "required": True, "name": "gender"},
],
}
]
# %% codecell
# ## Generate main trials
expgrids = {}
#terminate after 2 steps
earlyTermDist = [[-2, 0], [2, 0], [0, -2], [0, 2], [1, 1], [-1, 1], [1, -1], [-1, -1]]
for gridname, trialArray in basegrids.items():
for tname, trans in transformations.items():
for earlyTerm in ['full', 'earlyterm']:
tgridname = f"{gridname}-{tname}-{earlyTerm}"
transformed = trans(trialArray)
if earlyTerm == 'full':
expgrids[tgridname] = {
"feature_array": transformed,
"init_state": getFeatureXYs(transformed, "S")[0],
"absorbing_states": getFeatureXYs(transformed, "G"),
"name": tgridname,
**sharedparams
}
elif earlyTerm == 'earlyterm':
s0 = getFeatureXYs(transformed, "S")[0]
adjToS0 = [[s0[0] + dx, s0[1] + dy] for dx, dy in earlyTermDist]
adjToS0 = [s for s in adjToS0 if (s[0] >= 0) and (s[1] >= 0)]
expgrids[tgridname] = {
"feature_array": transformed,
"init_state": s0,
"absorbing_states": getFeatureXYs(transformed, "G") + adjToS0,
"name": tgridname,
**sharedparams
}
# %% codecell
# ### Trial orders
from functools import reduce
YELLOW = "#DCCB5D"
GREEN = "#44A9A0"
def generateTrialParameters(basegrids, seed, reverse=False, flipEarlyTerm=False, probetype="awareness"):
random.seed(seed)
translations = ['base', 'vflip', 'hflip', 'trans']
truemod_colors = [(GREEN, YELLOW), (YELLOW, GREEN)]
grididx = [12, 13, 14, 15]
# at the nav-trial level, randomly assign translations to the grids in 2 blocks
nblocks = 2
navtrialparams = []
for blocki in range(nblocks):
btrans = deepcopy(translations)
random.shuffle(btrans)
bgrids = deepcopy(grididx)
random.shuffle(bgrids)
navtrialparams.append([(blocki, gidx, trans) for gidx, trans in zip(bgrids, btrans)])
# at the probe-trial level, randomly but evenly assign true/mod to each obstacle in
#the first nav-trial block, then do the opposite in the second nav-trial block.
# shuffle the probe level trials within each grid block
probetrials = {}
firstblock = navtrialparams[0]
for blocki, gidx, trans in firstblock:
assert blocki == 0
probes = ['0', '1', '2', '3', '4']
probe_colororder = [[(p, corder) for corder in truemod_colors] for p in probes]
for pcolors in probe_colororder:
random.shuffle(pcolors)
probecolors0, probecolors1 = [list(pm) for pm in zip(*probe_colororder)]
random.shuffle(probecolors0)
random.shuffle(probecolors1)
probetrials[(0, gidx)] = probecolors0
probetrials[(1, gidx)] = probecolors1
# flatten the blocks
navtrialparams = sum(navtrialparams, [])
if reverse:
navtrialparams = navtrialparams[::-1]
emptygrid = [
"............G",
".............",
".............",
"......#......",
"......#......",
"......#......",
"...#######...",
"......#......",
"......#......",
"......#......",
".............",
".............",
"S............"
]
trialparams = []
pi = 0
for ri, (bi, gidx, trans) in enumerate(navtrialparams):
navgridname = f"gridB-{gidx}-0-{trans}-full"
#create navigation trial
trialparams.append({
"type": "GridNavigation",
"page": pi,
"round": ri,
"roundtype": "navigation",
"bonus": True,
"goalCountdown": True,
"hideObstaclesOnMove": True,
"message": "",
"taskparams": expgrids[navgridname],
"emptygrid": emptygrid,
"navgridname": navgridname,
"INITIALGOAL_COUNTDOWN_SEC": INITIALGOAL_COUNTDOWN_SEC,
"TILE_SIZE": 40
})
pi += 1
if probetype == "location":
#create maze-obstacle memory trials
probeparams = probetrials[(bi, gidx)]
for probeidx, (probeobs, colororder) in enumerate(probeparams):
num2alpha = dict(zip("01234", "ABCDE"))
probegridname = f"gridB-{gidx}-M-{trans}-full"
probegrid = deepcopy(expgrids[probegridname])
probeobs = str(probeobs)
obs_colors = { #color order is (true, mod)
probeobs: colororder[0],
num2alpha[probeobs]: colororder[1]
}
fc = {"#": 'black', **obs_colors}
probegrid['feature_colors'] = fc
#2afc
trialparams.append({
#plugin parameters
"type": "CustomItem",
"questiontext": "An obstacle was <b>either</b> in the yellow <b>or</b> green location (not both), which one was it?",
"responseLabels": ["Yellow", "?", "Green"],
"validResponses": ["Yellow", "Green"],
"initResponse": "?",
"responseEndLabels": ["", ""],
"stimuli": [{
"type": "gridworld",
"gridworldparams": probegrid,
"TILE_SIZE": 25
}],
"dontSave": ["stimuli", ],
#other information to save
"roundtype": "probe_2afc",
"page": pi,
"round": ri,
"queryround": probeidx, #the n-th asked about this round
"navgridname": navgridname,
"probegridname": probegridname,
"true_color": colororder[0],
"mod_color": colororder[1],
"probeobs": probeobs,
})
pi += 1
#confidence
trialparams.append({
#plugin parametersr
"type": "CustomItem",
"questiontext": "How confident are you?",
"responseLabels": [1, 2, 3, 4, "?", 5, 6, 7, 8],
"validResponses": [1, 2, 3, 4, 5, 6, 7, 8],
"initResponse": "?",
"responseEndLabels": ["I guessed", "I'm certain"],
"stimuli": [{
"type": "gridworld",
"gridworldparams": probegrid,
"TILE_SIZE": 25
}],
"dontSave": ["stimuli", ],
#other information to save
"roundtype": "probe_conf",
"page": pi,
"round": ri,
"queryround": probeidx, #the n-th asked about this round
"navgridname": navgridname,
"probegridname": probegridname,
"true_color": colororder[0],
"mod_color": colororder[1],
"probeobs": probeobs,
})
pi += 1
elif probetype == "awareness":
probeparams = probetrials[(bi, gidx)]
probeorder = [probeobs for probeobs, _ in probeparams]
#create maze-obstacle attention trials
for probeidx, probeobs in enumerate(probeorder):
probegrid = deepcopy(expgrids[navgridname])
probegrid['feature_colors'][probeobs] = '#48D1CC' #MediumTurquoise
trialparams.append({
"type": "GridBlockAttentionQuery",
"page": pi,
"round": ri,
"roundtype": "attentionquery",
"queryround": probeidx, #the n-th asked about this round
"probegridparams": probegrid,
"navgridname": navgridname,
"probeobs": probeobs,
"questiontext": "How aware of the highlighted obstacle were you at any point?"
})
pi += 1
else:
assert False, "unknown probetype"
return trialparams
# %% codecell
#note, there are 8 seeds, so 8 * 2 * 2 = 32 conditions
seeds = [23199, 27190, 31210, 31290, 31993, 61993, 63993, 67993]
timelines = []
for seed, reverse, probetype in product(seeds, [True, False], ['awareness', 'location']):
maintrials = generateTrialParameters(basegrids, seed=seed, reverse=reverse, probetype=probetype)
if probetype == "awareness":
pretask = instructionstraining + awareness_instructions
elif probetype == 'location':
pretask = instructionstraining + location_instructions
timelines.append(pretask+maintrials+posttask)
# %% codecell
params = {
"nRounds": nRounds,
"roundBonusCents": roundBonusCents,
"EXP_NAME": EXP_NAME
}
experiment = {"timelines": timelines, "params": params}
json.dump(
experiment,
open(EXP_CONFIG_FILE, "w"),
sort_keys=True, indent=4
)
# %%
if __name__ == "__main__":
fire.Fire(main)
| markkho/value-guided-construal | experiments/exp3/generate_trials.py | generate_trials.py | py | 25,424 | python | en | code | 20 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "markdown.markdown",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "markdown.markdown",
"... |
38961213741 | import functools
class Person:
def __init__(self,Eid,Ename,Desig,sal):
self.Eid=Eid
self.Ename=Ename
self.Desig=Desig
self.sal=int(sal)
def PrintValues(self):
print("Emp Id",self.Eid)
print("Emp name",self.Ename)
print("Emp Degnation",self.Desig)
print("Emp salary ",self.sal)
def __str__(self):
return self.Ename
f=open("EmpDetails","r")
empl=[]
for data in f:
data=data.rstrip("\n").split(",")
Eid=data[0]
Ename=data[1]
Desig=data[2]
sal=data[3]
obj=Person(Eid,Ename,Desig,sal)
empl.append(obj)
lst=[]
# for emp
# maxval=functools.reduce(lambda emp:max(sal),lst)
# print(maxval)
#reduce can be use in integer value
maxval=list(map(lambda obj:obj.sal,empl))
print(maxval)
m=functools.reduce(lambda sal1,sal2:sal1 if sal1>sal2 else sal2,maxval)
print(m)
maxsal=list(filter(lambda emp:emp.sal==m,empl))
for i in maxsal:
print(i) | Aswin2289/LuminarPython | LuminarPythonPrograms/Oops/empSalReduce.py | empSalReduce.py | py | 945 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "functools.reduce",
"line_number": 34,
"usage_type": "call"
}
] |
17335569332 | import pygame
import math
import random
class Bullet():
def __init__(self, health, direction, start, colour, rRange):
self.dims: tuple((int, int)) = (20, 20)
self.sprite = pygame.Surface((20, 20))
self.sprite.fill(colour)
self.sprite.set_colorkey(colour)
self.x, self.y = start[0], start[1]
self.body = pygame.Rect(self.pos, self.dims)
self.rRange = rRange
self.distance = 0
#health
self.hp = health
#mobility
self.speed = 10
self.direction = direction
@property
def w(self):
return self.dims[0]
@property
def h(self):
return self.dims[1]
@property
def pos(self):
return (self.x, self.y)
def update(self):
moveVecx = self.direction["chx"] * self.speed
moveVecy = self.direction["chy"] * self.speed
self.distance += math.sqrt(moveVecx**2 + moveVecy**2)
self.x += moveVecx
self.y += moveVecy
self.body.x = self.x
self.body.y = self.y
def render(self, screen, dims):
screen.blit(self.sprite, (self.x, self.y))
pygame.draw.rect(screen, (240, 2, 100), self.body)
| andrewchu16/untitledproject | src/bullet.py | bullet.py | py | 1,225 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "pygame.Surface",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_num... |
39784628011 | from unittest.mock import Mock
import pytest
from juju.action import Action
from juju_verify.utils.action import data_from_action
@pytest.mark.parametrize(
"data, key, exp_value",
[
({"results": {"host": "compute.0", "test": "test"}}, "host", "compute.0"),
({"results": {"test": "test"}}, "host", "default"),
({"results": {"ids": "[1, 2, 3]", "test": "test"}}, "ids", "[1, 2, 3]"),
({"test": "test"}, "host", "default"),
],
)
def test_data_from_action(data, key, exp_value):
"""Test helper function that parses data from Action.data.results dict."""
action = Mock(spec_set=Action)
action.data = data
output = data_from_action(action, key, "default")
assert output == exp_value
| canonical/juju-verify | tests/unit/utils/test_action.py | test_action.py | py | 746 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "unittest.mock.Mock",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "juju.action.Action",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "juju_verify.utils.action.data_from_action",
"line_number": 23,
"usage_type": "call"
},
{
"a... |
5061101951 | from ctypes import c_int, create_string_buffer
import json
import platform
if platform.system() == "Linux" :
from ctypes import cdll
else :
from ctypes import windll
ID_TIPO_COMPROBANTE_TIQUET = c_int( 1 ).value # "83" Tique
ID_TIPO_COMPROBANTE_TIQUE_FACTURA = c_int( 2 ).value # "81" Tique Factura A, "82" Tique Factura B, "111" Tique Factura C, "118" Tique Factura M
ID_TIPO_COMPROBANTE_TIQUE_NOTA_DE_CREDITO = c_int( 3 ).value # "110" Tique Nota de Credito, "112" Tique Nota de Credito A, "113" Tique Nota de Credito B, "114" Tique Nota de Credito C, "119" Tique Nota de Credito M
ID_TIPO_COMPROBANTE_TIQUE_NOTA_DE_DEBITO = c_int( 4 ).value # "115" Tique Nota de Debito A, "116" Tique Nota de Debito B, "117" Tique Nota de Debito C, "120" Tique Nota de Debito M
ID_TIPO_COMPROBANTE_NO_FISCAL = c_int( 21 ).value
ID_TIPO_DOCUMENTO_NINGUNO = c_int( 0 ).value
ID_TIPO_DOCUMENTO_DNI = c_int( 1 ).value
ID_TIPO_DOCUMENTO_CUIL = c_int( 2 ).value
ID_TIPO_DOCUMENTO_CUIT = c_int( 3 ).value
ID_TIPO_DOCUMENTO_CEDULA_IDENTIDAD = c_int( 4 ).value
ID_TIPO_DOCUMENTO_PASAPORTE = c_int( 5 ).value
ID_TIPO_DOCUMENTO_LIB_CIVICA = c_int( 6 ).value
ID_TIPO_DOCUMENTO_LIB_ENROLAMIENTO = c_int( 7 ).value
ID_RESPONSABILIDAD_IVA_NINGUNO = c_int( 0 ).value
ID_RESPONSABILIDAD_IVA_RESPONSABLE_INSCRIPTO = c_int( 1 ).value
ID_RESPONSABILIDAD_IVA_NO_RESPONSABLE = c_int( 3 ).value
ID_RESPONSABILIDAD_IVA_MONOTRIBUTISTA = c_int( 4 ).value
ID_RESPONSABILIDAD_IVA_CONSUMIDOR_FINAL = c_int( 5 ).value
ID_RESPONSABILIDAD_IVA_EXENTO = c_int( 6 ).value
ID_RESPONSABILIDAD_IVA_NO_CATEGORIZADO = c_int( 7 ).value
ID_RESPONSABILIDAD_IVA_MONOTRIBUTISTA_SOCIAL = c_int( 8 ).value
ID_RESPONSABILIDAD_IVA_CONTRIBUYENTE_EVENTUAL = c_int( 9 ).value
ID_RESPONSABILIDAD_IVA_CONTRIBUYENTE_EVENTUAL_SOCIAL = c_int( 10 ).value
ID_RESPONSABILIDAD_IVA_MONOTRIBUTO_INDEPENDIENTE_PROMOVIDO = c_int( 11 ).value
ID_MODIFICADOR_AGREGAR_ITEM = c_int( 200 ).value
ID_MODIFICADOR_ANULAR_ITEM = c_int( 201 ).value
ID_MODIFICADOR_AGREGAR_ITEM_RETORNO_ENVASES = c_int( 202 ).value
ID_MODIFICADOR_ANULAR_ITEM_RETORNO_ENVASES = c_int( 203 ).value
ID_MODIFICADOR_AGREGAR_ITEM_BONIFICACION = c_int( 204 ).value
ID_MODIFICADOR_ANULAR_ITEM_BONIFICACION = c_int( 205 ).value
ID_MODIFICADOR_AGREGAR_ITEM_DESCUENTO = c_int( 206 ).value
ID_MODIFICADOR_ANULAR_ITEM_DESCUENTO = c_int( 207 ).value
ID_MODIFICADOR_AGREGAR_ITEM_ANTICIPO = c_int( 208 ).value
ID_MODIFICADOR_ANULAR_ITEM_ANTICIPO = c_int( 209 ).value
ID_MODIFICADOR_AGREGAR_ITEM_DESCUENTO_ANTICIPO = c_int( 210 ).value
ID_MODIFICADOR_ANULAR_ITEM_DESCUENTO_ANTICIPO = c_int( 211 ).value
ID_MODIFICADOR_DESCUENTO = c_int( 400 ).value
ID_MODIFICADOR_AJUSTE = c_int( 401 ).value
ID_MODIFICADOR_AJUSTE_NEGATIVO = c_int( 402 ).value
ID_MODIFICADOR_AUDITORIA_DETALLADA = c_int( 500 ).value
ID_MODIFICADOR_AUDITORIA_RESUMIDA = c_int( 501 ).value
ID_MODIFICADOR_AGREGAR = ID_MODIFICADOR_AGREGAR_ITEM
ID_MODIFICADOR_ANULAR = ID_MODIFICADOR_ANULAR_ITEM
ID_TASA_IVA_NINGUNO = c_int( 0 ).value
ID_TASA_IVA_EXENTO = c_int( 1 ).value
ID_TASA_IVA_10_50 = c_int( 4 ).value
ID_TASA_IVA_21_00 = c_int( 5 ).value
ID_TASA_IVA_27_00 = c_int( 6 ).value
ID_IMPUESTO_NINGUNO = c_int( 0 ).value
ID_IMPUESTO_INTERNO_FIJO = c_int( 1 ).value
ID_IMPUESTO_INTERNO_PORCENTUAL = c_int( 2 ).value
ID_CODIGO_INTERNO = c_int( 1 ).value
ID_CODIGO_MATRIX = c_int( 2 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_SIN_DESCRIPCION = c_int( 0 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_KILOGRAMO = c_int( 1 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_METROS = c_int( 2 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_METRO_CUADRADO = c_int( 3 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_METRO_CUBICO = c_int( 4 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_LITROS = c_int( 5 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_UNIDAD = c_int( 7 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_PAR = c_int( 8 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_DOCENA = c_int( 9 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_QUILATE = c_int( 10 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MILLAR = c_int( 11 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MEGA_U_INTER_ACT_ANTIB = c_int( 12 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_UNIDAD_INT_ACT_INMUNG = c_int( 13 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_GRAMO = c_int( 14 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MILIMETRO = c_int( 15 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MILIMETRO_CUBICO = c_int( 16 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_KILOMETRO = c_int( 17 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_HECTOLITRO = c_int( 18 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MEGA_UNIDAD_INT_ACT_INMUNG = c_int( 19 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_CENTIMETRO = c_int( 20 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_KILOGRAMO_ACTIVO = c_int( 21 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_GRAMO_ACTIVO = c_int( 22 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_GRAMO_BASE = c_int( 23 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_UIACTHOR = c_int( 24 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_JGO_PQT_MAZO_NAIPES = c_int( 25 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MUIACTHOR = c_int( 26 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_CENTIMETRO_CUBICO = c_int( 27 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_UIACTANT = c_int( 28 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_TONELADA = c_int( 29 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_DECAMETRO_CUBICO = c_int( 30 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_HECTOMETRO_CUBICO = c_int( 31 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_KILOMETRO_CUBICO = c_int( 32 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MICROGRAMO = c_int( 33 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_NANOGRAMO = c_int( 34 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_PICOGRAMO = c_int( 35 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MUIACTANT = c_int( 36 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_UIACTIG = c_int( 37 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MILIGRAMO = c_int( 41 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MILILITRO = c_int( 47 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_CURIE = c_int( 48 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MILICURIE = c_int( 49 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MICROCURIE = c_int( 50 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_U_INTER_ACT_HORMONAL = c_int( 51 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MEGA_U_INTER_ACT_HORMONAL = c_int( 52 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_KILOGRAMO_BASE = c_int( 53 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_GRUESA = c_int( 54 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_MUIACTIG = c_int( 55 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_KILOGRAMO_BRUTO = c_int( 61 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_PACK = c_int( 62 ).value
AFIP_CODIGO_UNIDAD_MEDIDA_HORMA = c_int( 63 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_IMPUESTOS_NACIONALES = c_int( 1 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_IMPUESTOS_PROVINCIAL = c_int( 2 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_IMPUESTO_MUNICIPAL = c_int( 3 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_IMPUESTO_INTERNOS = c_int( 4 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_INGRESOS_BRUTOS = c_int( 5 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_PERCEPCION_DE_IVA = c_int( 6 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_PERCEPCION_DE_INGRESOS_BRUTOS = c_int( 7 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_PERCEPCION_POR_IMPUESTOS_MUNICIPALES = c_int( 8 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_OTRAS_PERCEPCIONES = c_int( 9 ).value
AFIP_CODIGO_OTROS_TRIBUTOS_OTROS = c_int( 99 ).value
AFIP_CODIGO_FORMA_DE_PAGO_CARTA_DE_CREDITO_DOCUMENTARIO = c_int( 1 ).value
AFIP_CODIGO_FORMA_DE_PAGO_CARTAS_DE_CREDITO_SIMPLE = c_int( 2 ).value
AFIP_CODIGO_FORMA_DE_PAGO_CHEQUE = c_int( 3 ).value
AFIP_CODIGO_FORMA_DE_PAGO_CHEQUES_CANCELATORIOS = c_int( 4 ).value
AFIP_CODIGO_FORMA_DE_PAGO_CREDITO_DOCUMENTARIO = c_int( 5 ).value
AFIP_CODIGO_FORMA_DE_PAGO_CUENTA_CORRIENTE = c_int( 6 ).value
AFIP_CODIGO_FORMA_DE_PAGO_DEPOSITO = c_int( 7 ).value
AFIP_CODIGO_FORMA_DE_PAGO_EFECTIVO = c_int( 8 ).value
AFIP_CODIGO_FORMA_DE_PAGO_ENDOSO_DE_CHEQUE = c_int( 9 ).value
AFIP_CODIGO_FORMA_DE_PAGO_FACTURA_DE_CREDITO = c_int( 10 ).value
AFIP_CODIGO_FORMA_DE_PAGO_GARANTIAS_BANCARIAS = c_int( 11 ).value
AFIP_CODIGO_FORMA_DE_PAGO_GIROS = c_int( 12 ).value
AFIP_CODIGO_FORMA_DE_PAGO_LETRAS_DE_CAMBIO = c_int( 13 ).value
AFIP_CODIGO_FORMA_DE_PAGO_MEDIOS_DE_PAGO_DE_COMERCIO_EXTERIOR = c_int( 14 ).value
AFIP_CODIGO_FORMA_DE_PAGO_ORDEN_DE_PAGO_DOCUMENTARIA = c_int( 15 ).value
AFIP_CODIGO_FORMA_DE_PAGO_ORDEN_DE_PAGO_SIMPLE = c_int( 16 ).value
AFIP_CODIGO_FORMA_DE_PAGO_PAGO_CONTRA_REEMBOLSO = c_int( 17 ).value
AFIP_CODIGO_FORMA_DE_PAGO_REMESA_DOCUMENTARIA = c_int( 18 ).value
AFIP_CODIGO_FORMA_DE_PAGO_REMESA_SIMPLE = c_int( 19 ).value
AFIP_CODIGO_FORMA_DE_PAGO_TARJETA_DE_CREDITO = c_int( 20 ).value
AFIP_CODIGO_FORMA_DE_PAGO_TARJETA_DE_DEBITO = c_int( 21 ).value
AFIP_CODIGO_FORMA_DE_PAGO_TICKET = c_int( 22 ).value
AFIP_CODIGO_FORMA_DE_PAGO_TRANSFERENCIA_BANCARIA = c_int( 23 ).value
AFIP_CODIGO_FORMA_DE_PAGO_TRANSFERENCIA_NO_BANCARIA = c_int( 24 ).value
AFIP_CODIGO_FORMA_DE_PAGO_OTROS_MEDIOS_DE_PAGO = c_int( 99 ).value
def cargarLibreria() :
sistema = platform.system()
if sistema == "Linux" :
return cdll.LoadLibrary("./EpsonFiscalInterface.so")
else :
if sistema == "Windows" :
return windll.LoadLibrary("./EpsonFiscalInterface.dll")
# -----------------------------------------------------------------------------
# Function: ticket
# -----------------------------------------------------------------------------
def ticket(datos_ticket):
try :
# get handle from DLL
Handle_HL = cargarLibreria()
# # connect
###Handle_HL.ConfigurarVelocidad( c_int(9600).value )
Handle_HL.ConfigurarPuerto( "0" )
ejecutarComando(Handle_HL, Handle_HL.Conectar())
# # try cancel all
ejecutarComando(Handle_HL, Handle_HL.Cancelar())
# # open
ejecutarComando(Handle_HL, Handle_HL.AbrirComprobante( ID_TIPO_COMPROBANTE_TIQUET ))
## Aca en esta funcion tenemos que poder evaluar los errores que puede arrojar
# # get document number
str_doc_number_max_len = 20
str_doc_number = create_string_buffer( b'\000' * str_doc_number_max_len )
error = Handle_HL.ConsultarNumeroComprobanteActual( str_doc_number, c_int(str_doc_number_max_len).value )
print("Get Doc. Number Error : "),
print(error)
print("Doc Number : "),
print(str_doc_number.value)
# # get document type
str_doc_type_max_len = 20
str_doc_type = create_string_buffer( b'\000' * str_doc_type_max_len )
print(str_doc_type)
error = Handle_HL.ConsultarTipoComprobanteActual( str_doc_type, c_int(str_doc_type_max_len).value )
print("Get Type Doc. Error : "),
print(error)
print("Doc Type : "),
print(str_doc_type.value)
# item
# imprimirItems(datos_ticket['items'], Handle_HL)
for item in datos_ticket['itemsComprobante'] :
# error = Handle_HL.ImprimirItem( ID_MODIFICADOR_AGREGAR, "Sardinas", "1", "100.1234", ID_TASA_IVA_EXENTO, ID_IMPUESTO_NINGUNO, "0", ID_CODIGO_INTERNO, "CodigoInterno4567890123456789012345678901234567890", "", AFIP_CODIGO_UNIDAD_MEDIDA_KILOGRAMO )
error = ejecutarComando(Handle_HL, Handle_HL.ImprimirItem( ID_MODIFICADOR_AGREGAR, enviar_texto(item["descripcion"]), enviar_texto(item['cantidad']), enviar_texto(item["importeOriginal"]), ID_TASA_IVA_EXENTO, ID_IMPUESTO_NINGUNO, "0", ID_CODIGO_INTERNO, enviar_texto(item["codigo"]), "", AFIP_CODIGO_UNIDAD_MEDIDA_UNIDAD))
print(str(item['cantidad'] + ' ' + item['descripcion'].ljust(40) + item['importeOriginal']))
# subtotal
ejecutarComando(Handle_HL, Handle_HL.ImprimirSubtotal())
# print(datos_ticket["total"])
print(str("IMPORTE" + " ").ljust(42) + str(datos_ticket["total"]))
# get subtotal gross amount
str_subtotal_max_len = 20
str_subtotal = create_string_buffer( b'\000' * str_subtotal_max_len )
error = Handle_HL.ConsultarSubTotalBrutoComprobanteActual( str_subtotal, c_int(str_subtotal_max_len).value )
print("Get Subtotal Gross : "),
print(error)
print("Subtotal Gross Amount : "),
print(str_subtotal.value)
# get subtotal gross amount
str_subtotal_max_len = 20
str_subtotal = create_string_buffer( b'\000' * str_subtotal_max_len )
print("como imprime:" + str(str_subtotal))
error = Handle_HL.ConsultarSubTotalNetoComprobanteActual( str_subtotal, c_int(str_subtotal_max_len).value )
print("Get Subtotal Net : "),
print(error)
print("Subtotal Net Amount : "),
print(str_subtotal.value)
# close
ejecutarComando(Handle_HL, Handle_HL.CerrarComprobante())
res = {"con_errores": 0, "descripcion": "OK", "numero": str(str_doc_number.value)[2:-1]}
except Exception as err :
res = {"con_errores": 1, "descripcion": str(err)}
finally:
ejecutarComando(Handle_HL, Handle_HL.Desconectar())
return json.dumps(res)
## Formato de datos de ticket
# ticket_str = "{'cliente': 'Martin Ramos'}"
# # , "Importe": "100.00"
# # , "Items":
# # [{ "descripcion": "Coca Cola"
# # , "importe": "120.00"}
# # ]}
def ticket_no_fiscal(datos_ticket):
try :
# get handle from DLL
Handle_HL = cargarLibreria()
# connect
###Handle_HL.ConfigurarVelocidad( c_int(9600).value )
Handle_HL.ConfigurarPuerto( "0" )
ejecutarComando(Handle_HL, Handle_HL.Conectar())
# try cancel all
ejecutarComando(Handle_HL, Handle_HL.Cancelar())
# open
ejecutarComando(Handle_HL, Handle_HL.AbrirComprobante( ID_TIPO_COMPROBANTE_NO_FISCAL ))
ejecutarComando(Handle_HL, Handle_HL.ImprimirTextoLibre(enviar_texto("Numero: " + str(datos_ticket['numero']))))
ejecutarComando(Handle_HL, Handle_HL.ImprimirTextoLibre(enviar_texto(datos_ticket['cliente'])))
imprimirItems(datos_ticket['itemsComprobante'], Handle_HL)
# subtotal
ejecutarComando(Handle_HL, Handle_HL.ImprimirTextoLibre(enviar_texto(str("IMPORTE" + " ").ljust(40) + str(datos_ticket['total']))))
# close
ejecutarComando(Handle_HL, Handle_HL.CerrarComprobante())
res = {"con_errores": 0, "descripcion": 'OK'}
except Exception as err :
res = {"con_errores": 1, "descripcion": str(err)}
finally:
ejecutarComando(Handle_HL, Handle_HL.Desconectar())
return json.dumps(res)
def enviar_texto(string) :
return string.encode('ascii')
def imprimirItems(datos_items, Handle_HL) :
for item in datos_items :
ejecutarComando(Handle_HL, Handle_HL.ImprimirTextoLibre(enviar_texto(str(item['cantidad'] + ' ' + item['descripcion'].ljust(40) + item['importeOriginal']))))
def encabezado() :
#title
print("*** Seteando Encabezado ***")
# get handle from DLL
Handle_HL = cargarLibreria()
# connect
###Handle_HL.ConfigurarVelocidad( c_int(9600).value )
Handle_HL.ConfigurarPuerto( "0")
error = Handle_HL.Conectar()
print("Connect : "),
print(hex(error))
# try cancel all
error = Handle_HL.Cancelar()
print("Cancel : "),
print(hex(error))
error = Handle_HL.EstablecerEncabezado(1, "Universidad Nacional de Quilmes")
print("Cancel : "),
print(hex(error))
# disconect
error = Handle_HL.Desconectar()
print("Disconect : "),
print(error)
def descargar_reportes() :
#title
print("*** Seteando Encabezado ***")
# get handle from DLL
Handle_HL = cargarLibreria()
# connect
###Handle_HL.ConfigurarVelocidad( c_int(9600).value )
Handle_HL.ConfigurarPuerto( "0" )
error = Handle_HL.Conectar()
print("Connect : "),
print(hex(error))
# try cancel all
error = Handle_HL.Cancelar()
print("Cancel : "),
print(hex(error))
error = Handle_HL.Descargar(enviar_texto("201021"), enviar_texto("211021"), "downloads")
print("Descargando Auditoria : "),
print(hex(error))
# disconect
error = Handle_HL.Desconectar()
print("Disconect : "),
print(error)
def cierreZ():
#title
print("*** Haciendo Cierre Z ***")
try :
# get handle from.so
Handle_HL = cargarLibreria()
# connect
###Handle_HL.ConfigurarVelocidad( c_int(9600).value )
error = Handle_HL.ConfigurarPuerto( "0" )
ejecutarComando(Handle_HL, Handle_HL.Conectar())
# try cancel all
ejecutarComando(Handle_HL, Handle_HL.Cancelar())
ejecutarComando(Handle_HL, Handle_HL.ImprimirCierreZ())
res = {"con_errores": 0, "descripcion": 'OK'}
except Exception as err :
res = {"con_errores": 1, "descripcion": str(err)}
finally:
ejecutarComando(Handle_HL, Handle_HL.Desconectar())
return json.dumps(res)
def cierreX():
print("*** Haciendo Cierre X ***")
try :
# get handle from.so
Handle_HL = cargarLibreria()
# connect
error = Handle_HL.ConfigurarPuerto( "0" )
ejecutarComando(Handle_HL, Handle_HL.Conectar())
# try cancel all
ejecutarComando(Handle_HL, Handle_HL.Cancelar())
ejecutarComando(Handle_HL, Handle_HL.ImprimirCierreX())
res = {"con_errores": 0, "descripcion": 'OK'}
except Exception as err :
res = {"con_errores": 1, "descripcion": str(err)}
finally:
ejecutarComando(Handle_HL, Handle_HL.Desconectar())
return json.dumps(res)
def ejecutarComando(Handle_HL, comando) :
### En caso que el hexa sea 0x0 o bien 0x05000024
if not (comando == 0 or comando == 83886116 or comando == 83886127) :
raise ValueError(verificarError(Handle_HL, comando))
def verificarError(Handle_HL, error) :
descripcion_error = create_string_buffer(b'\000' * 500)
error = Handle_HL.ConsultarDescripcionDeError(error, descripcion_error, c_int(500).value)
return str(descripcion_error.value)[1:]
def reportes() :
print("*** Reportes ***")
# get handle from DLL
Handle_HL = cargarLibreria()
# connect
###Handle_HL.ConfigurarVelocidad( c_int(9600).value )
Handle_HL.ConfigurarPuerto( "0" )
error = Handle_HL.Conectar()
print("Connect : "),
print(hex(error))
# try cancel all
##error = Handle_HL.Cancelar()
print("Cancel : "),
print(hex(error))
error = Handle_HL.EnviarComando( "0970|0000|1|3")
print("Reporte : "),
print(hex(error))
def pruebaTicket(datos_ticket):
try :
Handle_HL = cargarLibreria()
for item in datos_ticket['itemsComprobante'] :
print(str(item['cantidad'] + ' ' + item['descripcion'].ljust(40) + item['importeOriginal']))
print(str("IMPORTE" + " ").ljust(42) + str(datos_ticket["total"]))
raise ValueError("Esto es un error de prueba")
res = {"con_errores": 0, "descripcion": "OK"}
except Exception as err :
res = {"con_errores": 1, "descripcion": str(err)}
finally:
return json.dumps(res)
# # -----------------------------------------------------------------------------
# # main
# # -----------------------------------------------------------------------------
# print(" ")
# print(" ")
# print("----Basic Test")
# # dll_version()
# # dll_ll_test_comm()
# # equipment_machine_version()
# # print_X_and_Z()
# # set_and_get_header_trailer()
# # set_and_get_datetime()
# # cancel_all()
# print(" ")
# print(" ")
# print("----Testing Sales")
# ##encabezado()
# # ticket_str = '{"cliente": "Martin Ramos", "items": [{"cantidad":"2", "codigo":"123456789", "descripcion": "coca cola", "importe": "120.00"}], "total": "240"}'
# # ticket(ticket_str)
# # ticket_no_fiscal(ticket_str)
# # cierreZ()
# # cierreX()
# # descargar_reportes()
# #prueba_json(ticket_str)
# cierreX()
# # ticket_from_ticket_invoice()
# # ticket_invoice()
# # ticket_invoice_B()
# # ticket_debit_note()
# # ticket_debit_note_B()
# # ticket_credit_note()
# # ticket_credit_note_B()
# print(" ")
# print(" ")
# print("----Test Close Day")
# # audit()
# # download()
| martin-ramos/epsonfiscalproxy | epsonproxy.py | epsonproxy.py | py | 21,977 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "platform.system",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ctypes.c_int",
"line_num... |
36222363296 | # -*- coding: utf-8 -*-
import typing as T
import polars as pl
from ..importer import (
TsvGzReader,
dataframe_to_list_table,
)
from ..images import icon_by_portal, image_by_map
from .go_cmd import with_teleport_command
if T.TYPE_CHECKING:
from rstobj import Image, ListTable
def lt_list_main_city_gps_and_label_and_image() -> T.List[
T.Tuple['ListTable', str, 'Image']
]:
reader = TsvGzReader(__file__)
df = reader.read_df("main-city.tsv.gz")
df = df.select([
pl.col("zone").alias("城市"),
pl.col("zone").alias("图标"),
pl.col("loc_name").alias("地点"),
pl.col("description").alias("描述"),
pl.col("go_cmd").alias("go_cmd"),
])
df1 = with_teleport_command(df, go_cmd_col="go_cmd")
df2 = df1.with_column(pl.col("图标").apply(f=icon_by_portal))
lst = list()
for city in df2["城市"].unique(maintain_order=True):
sub_df = df2.filter(df2["城市"] == city)
image = image_by_map(city)
image.height = 480
lst.append(
(
dataframe_to_list_table(sub_df, title=f"{city}传送GM命令"),
city,
image,
)
)
return lst
| MacHu-GWU/wotlkdoc-project | wotlkdoc/docs/gps/main_city.py | main_city.py | py | 1,226 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "importer.TsvGzReader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "polars.col",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "polars.col"... |
42368009926 | from django.contrib import admin
from sign.models import Event, Guest
# Register your models here.
# EventAdmin类继承admin.ModelAdmin,admin.ModelAdmin类是一个自定义工具,能够自定义一些模块的特征
class EventAdmin(admin.ModelAdmin):
# list_display:用于定义显示哪些字段,必须是Event类里定义的字段
list_display = ['id', 'name', 'status', 'address', 'start_time']
# 创建搜索栏
search_fields = ['name']
# 创建过滤器
list_filter = ['status']
class GuestAdmin(admin.ModelAdmin):
list_display = ['realname', 'phone', 'email', 'sign', 'create_time', 'event']
search_fields = ['realname', 'phone']
list_filter = ['sign']
admin.site.register(Event, EventAdmin)
admin.site.register(Guest, GuestAdmin)
| nhan118/learn | guest/sign/admin.py | admin.py | py | 786 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 13,
"usage_type": "attribute"
... |
21160846100 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 19:00:57 2018
@author: HP
"""
from numpy import asarray
from numpy import zeros
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, Model
from keras.layers import Dense
from keras.layers import Flatten, LSTM ,Dropout,GRU, Bidirectional
from keras.layers import Embedding, Maximum, Merge, Input, concatenate
from collections import defaultdict
from nltk.corpus import brown,stopwords
from keras.layers import Conv1D, MaxPooling1D, Conv2D, MaxPooling2D
import random
import nltk
#brown.categories()
#brown.words(categories='news')
#brown.words(fileids=['cg22'])
#brown.sents(categories=['news', 'editorial', 'reviews'])
batch_size=30
embedding_size=128
# Convolution
kernel_size = 5
filters1 = 64
filters2 =128
filters3=256
filters4=512
filters5=1024
pool_size = 4
# GRU
gru_output_size = 70
#LSTM
lstm_output_size = 70
dataset = [] # 500 samples
for category in brown.categories():
for fileid in brown.fileids(category):
dataset.append((brown.words(fileids = fileid),category))
dataset = [([w.lower() for w in text],category) for text,category in dataset]
labels=[]
for sample in dataset:
labels.append(sample[1])
inputset=[]
for sample in dataset:
inputset.append(' '.join(sample[0]))
categ=brown.categories()
label_class=[]
for x in labels:
label_class.append(categ.index(x))
len_finder=[]
for dat in inputset:
len_finder.append(len(dat))
input_train=[]
j=0;
for zz in inputset:
j=j+1
if (j%4 is not 0):
input_train.append(zz)
input_test=[]
j=0;
for zz in inputset:
j=j+1
if (j%4 is 0):
input_test.append(zz)
label_train=[]
j=0;
for zz in label_class:
j=j+1
if (j%4 is not 0):
label_train.append(zz)
label_test=[]
j=0;
for zz in label_class:
j=j+1
if (j%4 is 0):
label_test.append(zz)
#one hot encoding
i=0
y=np.zeros((len(label_class),max(label_class)+1))
for x in label_class:
y[i][x]=1
i=i+1
i=0
y_train=np.zeros((len(label_train),max(label_train)+1))
for x in label_train:
y_train[i][x]=1
i=i+1
i=0
y_test=np.zeros((len(label_test),max(label_test)+1))
for x in label_test:
y_test[i][x]=1
i=i+1
t = Tokenizer()
t.fit_on_texts(input_train)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(input_train)
#print(encoded_docs)
# pad documents to a max length of 4 words
max_length = max(len_finder)
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# load the whole embedding into memory
embeddings_index = dict()
f = open("G:\\NLP\\Dataset\\GloVe\\glove.6B.100d.txt", encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
#print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
rows,cols = padded_docs.shape
input_shape = Input(shape=(rows,cols))
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=max_length, trainable=False)(input_shape)
tower_1 = Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1)(e)
tower_1 = MaxPooling1D(pool_size=pool_size)(tower_1)
tower_2 = Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1)(e)
tower_2 = MaxPooling1D(pool_size=pool_size)(tower_2)
merged = concatenate([tower_1, tower_2])
out = Dense(200, activation='relu')(merged)
out = Dense(15, activation='softmax')(out)
model = Model(input_shape, out)
# compile the model
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs,y_train, epochs=3, verbose=0)
#Testing the model
tt = Tokenizer()
tt.fit_on_texts(input_test)
tvocab_size = len(tt.word_index) + 1
# integer encode the documents
tencoded_docs = tt.texts_to_sequences(input_test)
#print(encoded_docs)
# pad documents to a max length of 4 words
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# evaluate the model
loss, accuracy = model.evaluate(tpadded_docs, y_test, verbose=0)
print('Accuracy: %f' % (accuracy*100)) | mharish2797/DNN-Text-Classifiers | Simple DNN classifiers/Brown Corpus based basic DNN Classifiers/Brown_classifier with parallel network.py | Brown_classifier with parallel network.py | py | 4,703 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "nltk.corpus.brown.categories",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.brown",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.brown.fileids",
"line_number": 46,
"usage_type": "call"
},
{
"api_name... |
19121772365 | from rest_framework import status
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.decorators import api_view, action
from rest_framework import viewsets, permissions, status
from django.http import Http404
from django.shortcuts import render
from leaderboard.models import leaderboard_user
from leaderboard.serializers import LeaderboardUserSerializer
def index(request):
return render(request, 'index.html')
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'scoreboard_users': reverse('user_list', request=request, format=format)
})
class UserViewSet(viewsets.ModelViewSet):
"""
retrieve:
Return the given user.
list:
Return a list of all the existing users.
create:
Create a new user instance.
delete:
Delete a user instance.
update:
Update a user instance.
point_up:
Adds a point to the given user.
point_down:
Removes a point from the given user.
"""
queryset = leaderboard_user.objects.all()
serializer_class = LeaderboardUserSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
@action(detail=True)
def point_up(self, request, *args, **kwargs):
def get_object(self, pk):
try:
user = leaderboard_user.objects.get(pk=pk)
return user
except leaderboard_user.DoesNotExist:
raise Http404
user = self.get_object()
user.point_up()
return Response(status=status.HTTP_200_OK)
@action(detail=True)
def point_down(self, request, *args, **kwargs):
def get_object(self, pk):
try:
user = leaderboard_user.objects.get(pk=pk)
return user
except leaderboard_user.DoesNotExist:
raise Http404
user = self.get_object()
user.point_down()
return Response(status=status.HTTP_200_OK)
def perform_create(self, serializer):
serializer.save() | alex-gmoca/spring | spring/leaderboard/views.py | views.py | py | 2,128 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rest_framework.reverse.reverse",
"line_number": 17,
"usage_type": "call"
},
... |
14876640371 | from django.db import models
from django.utils.text import slugify
class Pet(models.Model):
MAX_LENGTH_NAME = 30
name = models.CharField(
max_length=MAX_LENGTH_NAME,
null=False,
blank=False,
)
personal_pet_photo = models.URLField(
null=False,
blank=False,
)
pet_slug = models.SlugField(
unique=True,
editable=False,
null=False,
blank=True,
)
date_of_birth = models.DateField(
null=True,
blank=True,
)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if not self.pet_slug:
self.pet_slug = slugify(f"{self.id}-{self.name}")
return super().save(*args, **kwargs)
def __str__(self):
return f"Name={self.name} - ID={self.id}"
| Ivo2291/petstagram | petstagram/pets/models.py | models.py | py | 820 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
3013315354 | import time
import platform
import cpuinfo
os_version = platform.system()
print('CPU: ' + cpuinfo.get_cpu_info().get('brand_raw', "Unknown"))
print('Arch: ' + cpuinfo.get_cpu_info().get('arch_string_raw', "Unknown"))
print(f'OS: {str(os_version)}')
print('\nBenchmarking: \n')
start_benchmark = 10000 # change this if you like (sample: 1000, 5000, etc)
start_benchmark = start_benchmark
repeat_benchmark = 10 # attemps, change this if you like (sample: 3, 5, etc)
repeat_benchmark = repeat_benchmark
average_benchmark = 0
for _ in range(0,repeat_benchmark):
start = time.time()
for _ in range(0,start_benchmark):
for x in range(1,1000):
3.141592 * 2**x
for x in range(1,10000):
float(x) / 3.141592
for x in range(1,10000):
3.141592 / x
end = time.time()
duration = (end - start)
duration = round(duration, 3)
average_benchmark += duration
print(f'Time: {str(duration)}s')
average_benchmark = round(average_benchmark / repeat_benchmark, 3)
print(f'Average (from 10 repeats): {str(average_benchmark)}s') | LopeKinz/raspberry_debug | test.py | test.py | py | 1,056 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "platform.system",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cpuinfo.get_cpu_info",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cpuinfo.get_cpu_info",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "time.time",
... |
21990463619 | from flask import Flask, g, render_template,request,redirect,session,url_for,flash
import sqlite3
app = Flask(__name__)
app.config['SECRET_KEY'] = 'dev'
db_path = input("Enter database path: ")
# =============================================================================
# /Users/Eugen/Desktop/Final/blog.db
# =============================================================================
def connect_db():
sql = sqlite3.connect(db_path)
sql.row_factory = sqlite3.Row
return sql
def get_db():
if not hasattr(g, 'sqlite3'):
g.sqlite3_db = connect_db()
return g.sqlite3_db
@app.before_request
def before_request():
g.db = get_db()
if 'username' not in session:
session['username']=None
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite3_db.close()
@app.route('/')
def index():
if session['username'] != None:
username = session['username']
cur = g.db.execute('SELECT * FROM posts ORDER BY published_date DESC')
data = cur.fetchall()
return render_template('index.html',data=data,username=username)
else:
cur = g.db.execute('SELECT * FROM posts ORDER BY published_date DESC')
data = cur.fetchall()
return render_template('index2.html',data=data)
@app.route('/login',methods=['GET','POST'])
def login():
if request.method =='POST':
username = request.form['username']
password = request.form['password']
cur = g.db.execute('SELECT * from users')
user_data = cur.fetchall()
try:
g.db.execute('INSERT into users (username,password) values (?,?)',[username,password])
g.db.commit()
session['username'] = request.form['username']
except Exception as e:
for row in user_data:
if(row[0] == username and row[1]==password ):
session['username'] = request.form['username']
print(e)
return redirect('/dashboard')
else:
return render_template('login.html')
@app.route('/logout',methods=['GET'])
def logout():
session['username']=None
return redirect('/')
@app.route('/dashboard',methods=['GET'])
def dashboard():
username = session['username']
if username != None:
cur = g.db.execute("SELECT * FROM posts WHERE author=?",[username])
data = cur.fetchall()
return render_template('dashboard.html',data=data,username=username)
else:
return redirect('/login')
@app.route('/add',methods=['GET','POST'])
def add():
username=session['username']
if username != None:
if request.method =='GET':
return render_template('add.html',username=username)
elif request.method == 'POST':
try:
if(username==request.form['author'] or username=='admin'):
g.db.execute('INSERT into posts (title,author,content,published_date) values (?,?,?,?) ',[request.form['title'],request.form['author'],request.form['content'],request.form['published_date']])
g.db.commit()
return redirect('/')
else:
flash('You are not authorized to post to the blog hosted by {}'.format(request.form['author']))
return redirect('/add')
except Exception as e:
print(e)
flash('Duplicate Title and Author!','error')
return redirect('/add')
else:
return redirect('/')
@app.route('/delete',methods=['POST'])
def delete():
username=session['username']
if username != None:
del_title = request.form['del_title']
del_author = request.form['del_author']
g.db.execute("DELETE FROM posts WHERE title=? AND author=?",[del_title,del_author])
g.db.commit()
return redirect('/dashboard')
else:
return redirect('/')
@app.route('/edit',methods=['GET','POST'])
def edit():
username=session['username']
if request.method =='GET':
if username != None:
e_title = request.form['edit_title']
e_author = request.form['edit_author']
return redirect(url_for('update',e_title=e_title,e_author=e_author))
else:
return redirect('/')
if request.method == 'POST':
if username != None:
e_title = request.form['edit_title']
e_author = request.form['edit_author']
return redirect(url_for('update',e_title=e_title,e_author=e_author))
else:
return redirect('/')
@app.route('/update/<e_title>/<e_author>',methods=['GET','POST'])
def update(e_title,e_author):
username=session['username']
if username != None:
if request.method == 'GET':
cur = g.db.execute("SELECT * FROM posts WHERE title=? AND author=?",[e_title,e_author])
data = cur.fetchall()
return render_template('update.html',data=data,username=username)
elif request.method == 'POST':
e_title=request.form['e_title']
e_author=request.form['e_author']
g.db.execute("UPDATE posts SET title=?,author=?,content=?,published_date=? WHERE title=? AND author=?",[request.form['title'],request.form['author'],request.form['content'],request.form['published_date'],e_title,e_author])
g.db.commit()
return redirect('/dashboard')
else:
return redirect('/')
if __name__ == '__main__':
app.run()
| EugenMorarescu/IS211_Final | Final/final_project.py | final_project.py | py | 5,821 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_numbe... |
12689186127 | import matplotlib.pyplot as plt
import requests
import numpy as np
# Enter Spotify web API access token credentials below
# If you don't have them you can get them here:
# https://developer.spotify.com/dashboard/applications
client_id = "YOUR_CLIENT_ID_HERE"
client_secret = "YOUR_SECRET_ID_HERE"
# The below code generates a temporary access token using the credentials
# entered above through the Spotify web API
raw_response = requests.post('https://accounts.spotify.com/api/token', {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
})
print(raw_response)
# Converts the response to json
json_response = raw_response.json()
# Checks response code and runs search if connection made,
# otherwise tries to provide useful advice.
if raw_response.status_code == 200:
print("Connection established and authorised.")
# Asks user for an artist. The artists information is then retrieved from
# the Spotify web API.
artist_name = input("Please enter an artist: ")
# Checks if nothing has been entered by user and provides default answer
if artist_name == "":
print("No artist entered, so you will be provided Justin Bieber instead.")
artist_name = "Justin Bieber"
artist_info = requests.get('https://api.spotify.com/v1/search',
headers={'authorization': "Bearer " + json_response['access_token']},
params={'q': artist_name, 'type': 'artist'})
# Converts the artist_info to json
artist_info = artist_info.json()
# Prints artists name rating and a link to them on Spotify
print("You have selected: {} \nThis artist has a popularity of {}%".format(artist_info["artists"]["items"][0]["name"], artist_info["artists"]["items"][0]["popularity"]) )
print(artist_info["artists"]["items"][0]["external_urls"]["spotify"])
# To see all json data uncomment the below...
# print(artist_info)
# Below draws a table showing the artist and popularity
fig, ax = plt.subplots()
# Gets data from converted json file about the artist and uses some sample data
# to make the results more interesting.
names = (artist_info["artists"]["items"][0]["name"], "The Beatles", "Metallica", "Dido")
y_pos = np.arange(len(names))
popularities = (artist_info["artists"]["items"][0]["popularity"], 88, 84, 75)
# Table titles and specifics listed below
ax.barh(y_pos, popularities, align='center')
ax.set_yticks(y_pos, labels=names)
ax.set_xlabel('Popularity %')
ax.set_xlim([0, 100])
ax.set_title('Artists Popularity')
# Displays table once the below is ran
plt.tight_layout()
plt.show()
elif raw_response.status_code == 400:
print("Unable to connect. This is most likely due to "
"invalid 'client_id' or 'client_secret'.")
print("For more information check the website: "
"'https://developer.spotify.com/documentation/general/guides/authorization/'")
# Any other response code grouped here, can add more to this later.
else:
print("Unable to connect. Error unknown.")
| Oliver343/ArtistSearchAPI | ArtistSearch.py | ArtistSearch.py | py | 3,149 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.post",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pypl... |
72453729787 | from django.conf.urls.defaults import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^lsdesign/', include('lsdesign.foo.urls')),
url(r'^$',
'django.views.generic.simple.direct_to_template',
{'template': 'homepage.html'}),
(r'^work/', include('lsdesign.portfolio.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
)
if getattr(settings, 'SERVE_STATIC_MEDIA', False):
urlpatterns += patterns('django.views.static',
(r'^%s(?P<path>.*)' % settings.MEDIA_URL, 'serve',
{'document_root': settings.MEDIA_ROOT}),
)
| cyndi/lacey-springs-designs | lsdesign/urls.py | urls.py | py | 955 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 22,
"usage_type": "attribute"
},
{
... |
25958786704 | from django.db import models
from django.forms import ModelForm
from django.utils import timezone
class Author(models.Model):
NATION_CHOICES = (
(None, 'Nationality'),
('CH', 'China'),
('US', 'America'),
('UK', 'England'),
('GE', 'German'),
('CA', 'Canada'),
)
name = models.CharField(max_length=80, unique=False, verbose_name='Author name')
nation = models.CharField(max_length=80, unique=False, verbose_name='Nationality', choices=NATION_CHOICES)
def save(self, *args, **kwargs):
try:
old_author = Author.objects.get(name=self.name)
except Author.DoesNotExist:
super().save(*args, **kwargs)
return self
else:
return old_author
def __str__(self):
return self.name
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=80, unique=True, verbose_name='Article name')
pub_date = models.DateTimeField('date published')
author = models.ForeignKey(Author, null=True, on_delete=models.CASCADE)
context = models.TextField()
def __str__(self):
return self.title
class Comments(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
name = models.CharField(max_length=80, null=True)
body = models.TextField()
created_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
active = models.BooleanField(default=False)
class Meta:
ordering = ['created_on']
def __str__(self):
return 'Comment {} by {}'.format(self.body, self.name)
class CommentsForm(ModelForm):
class Meta:
model = Comments
fields = ['name', 'body', 'active']
class AuthorForm(ModelForm):
class Meta:
model = Author
fields = '__all__'
class ArticleForm(ModelForm):
class Meta:
model = Article
fields = ['title', 'pub_date', 'context'] | binkesi/blogsgn | models.py | models.py | py | 2,015 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": ... |
42749263557 | #!/usr/bin/env python
# coding=utf-8
# wujian@2018
import os
import argparse
import numpy as np
from libs.utils import istft, get_logger
from libs.opts import StftParser
from libs.data_handler import SpectrogramReader, WaveWriter
from libs.beamformer import DSBeamformer
logger = get_logger(__name__)
def run(args):
stft_kwargs = {
"frame_len": args.frame_len,
"frame_hop": args.frame_hop,
"window": args.window,
"center": args.center,
"transpose": False
}
topo = list(map(float, args.linear_topo.split(",")))
doa = args.doa if args.doa > 0 else 180 + args.doa
if doa < 0 or doa > 180:
raise RuntimeError(f"Illegal value for DoA: {args.doa:.2f}")
spectrogram_reader = SpectrogramReader(
args.wav_scp,
round_power_of_two=args.round_power_of_two,
**stft_kwargs)
beamformer = DSBeamformer(topo)
logger.info(f"Initialize {len(topo):d} channel DSBeamformer")
with WaveWriter(args.dst_dir, fs=args.fs) as writer:
for key, stft_src in spectrogram_reader:
stft_enh = beamformer.run(doa,
stft_src,
c=args.speed,
sample_rate=args.fs)
power = spectrogram_reader.power(key)
samps = istft(stft_enh, **stft_kwargs, power=power)
writer.write(key, samps)
logger.info(f"Processed {len(spectrogram_reader):d} utterances")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Command to apply delay and sum beamformer.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[StftParser.parser])
parser.add_argument("wav_scp",
type=str,
help="Rspecifier for multi-channel wave file")
parser.add_argument("dst_dir",
type=str,
help="Directory to dump enhanced results")
parser.add_argument("--fs",
type=int,
default=16000,
help="Sample frequency of input wave")
parser.add_argument("--speed",
type=float,
default=240,
help="Speed of sound")
parser.add_argument("--linear-topo",
type=str,
required=True,
help="Topology of linear microphone arrays")
parser.add_argument("--doa",
type=float,
default=90,
help="Given DoA for DS beamformer, in degrees")
args = parser.parse_args()
run(args) | Fuann/TENET | sptk/apply_ds_beamformer.py | apply_ds_beamformer.py | py | 2,731 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "libs.utils.get_logger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "libs.data_handler.SpectrogramReader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "libs.beamformer.DSBeamformer",
"line_number": 35,
"usage_type": "call"
},
{... |
40407441083 | import sqlite3 as sql
def CreateDatabase():
coneccion = sql.connect("./Database/datos.db")
coneccion.commit()
coneccion.close()
CreateInitialTables()
print("Se ha creado la base de datos")
def SendQuery(query):
query = query
coneccion = sql.connect("./Database/datos.db")
cursor = coneccion.cursor()
cursor.execute(query)
data = cursor.fetchall()
coneccion.commit()
coneccion.close()
return data
def CreateInitialTables():
SendQuery("CREATE TABLE Chicas (cid integer primary key, nombre text not null)")
SendQuery("CREATE TABLE Asistencia (cid integer, fecha text not null, asistencia text, hora text, PRIMARY KEY (cid, fecha))")
SendQuery(" CREATE TABLE Pagos (cid integer, fecha text not null, sueldo integer,bonus integer, fichas integer, extras integer,info text,PRIMARY KEY (cid, fecha))")
def test_data():
import random
asistencia = ['si', 'no']
for i in range(10):
SendQuery(f"INSERT INTO Pagos (cid, fecha, sueldo, bonus,fichas, extras, info) VALUES ('{i+1}',date('now'),0, {random.randint(1000,99999)}, {random.randint(1000,99999)}, {random.randint(1000,99999)}, '')")
SendQuery(f"INSERT INTO Chicas (nombre) VALUES ('chica{str(i + 10)}')")
SendQuery(f"INSERT INTO Asistencia (cid, fecha, asistencia, hora) VALUES ('{i+1}',date('now'), '{random.choice(asistencia)}', '16:00')")
if __name__ == "__main__":
CreateDatabase()
test_data()
#SendQuery("SELECT nombre, fecha, sueldo, fichas, extras FROM Pagos, Chicas WHERE Chicas.cid = Pagos.cid") | Panconquesocl/LAS | Backend/DbUtils.py | DbUtils.py | py | 1,566 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.choice",
"li... |
27391464453 | import logging
import os
from urllib.parse import urljoin, urlunparse
from rdflib import Graph, Literal, Namespace
from rdflib.namespace import OWL, RDF, RDFS, XSD
from crunch_uml import const, db, util
from crunch_uml.excpetions import CrunchException
from crunch_uml.renderers.renderer import ModelRenderer, RendererRegistry
logger = logging.getLogger()
class LodRenderer(ModelRenderer):
'''
Renders all model packages using jinja2 and a template.
A model package is a package with at least 1 class inside
'''
def writeToFile(self, graph, args):
pass
def render(self, args, database: db.Database):
if args.linked_data_namespace is None:
logger.warning(
f'No namespace provided via parameter "linked_data_namespace", using default {const.DEFAULT_LOD_NS}'
)
args.linked_data_namespace = const.DEFAULT_LOD_NS
elif not isinstance(args.linked_data_namespace, str):
args.linked_data_namespace = urlunparse(args.linked_data_namespace)
# sourcery skip: raise-specific-error
MYNS = Namespace(args.linked_data_namespace) # noqa: F841
schema = Namespace("http://schema.org/") # noqa: F841
# Create graph
g = Graph()
# Get list of packages that are to be rendered
models = self.getModels(args, database)
if len(models) is None:
msg = "Cannot render output: packages does not exist"
logger.error(msg)
raise CrunchException(msg)
class_dict = {} # used to find all classes by guid
# First add all classes
for model in models:
modelname = util.remove_substring(model.name, 'model')
ns = Namespace(urljoin(str(args.linked_data_namespace), f"/{modelname}/"))
for cls in model.classes:
# Werk eerst de dict bij
class_dict[cls.id] = ns[cls.id]
# Voeg de klasse toe
g.add((ns[cls.id], RDF.type, OWL.Class))
g.add((ns[cls.id], RDFS.label, Literal(cls.name)))
if cls.definitie is not None:
g.add((ns[cls.id], RDFS.comment, Literal(cls.definitie)))
for attribute in cls.attributes:
# Voeg de attributen toe
if attribute.name is not None and attribute.primitive is not None:
g.add((ns[attribute.id], RDF.type, OWL.DatatypeProperty))
g.add((ns[attribute.id], RDFS.domain, ns[cls.id]))
g.add((ns[attribute.id], RDFS.label, Literal(attribute.name)))
g.add((ns[attribute.id], RDFS.range, XSD.string))
if attribute.definitie is not None:
g.add((ns[attribute.id], RDFS.comment, Literal(attribute.definitie)))
# Then add all relations
for model in models:
for cls in model.classes:
# First set inheritance
for subclass in cls.subclasses:
super_cls = class_dict.get(cls.id)
if subclass.superclass is not None:
sub_cls = class_dict.get(subclass.superclass.id)
if super_cls is not None and sub_cls is not None:
g.add((sub_cls, RDFS.subClassOf, super_cls))
# Then set associations
for assoc in cls.uitgaande_associaties:
from_cls = class_dict.get(cls.id)
to_cls = class_dict.get(assoc.dst_class.id)
if from_cls is not None and to_cls is not None:
# Voeg properties toe
g.add((ns[assoc.id], RDF.type, OWL.ObjectProperty))
g.add((ns[assoc.id], RDFS.domain, from_cls))
g.add((ns[assoc.id], RDFS.range, to_cls))
g.add((ns[assoc.id], RDFS.label, Literal(assoc.name)))
if assoc.definitie is not None:
g.add((ns[assoc.id], RDFS.comment, Literal(assoc.definitie)))
self.writeToFile(g, args)
@RendererRegistry.register(
"ttl",
descr='Renderer that renders Linked Data ontology in turtle from the supplied models, '
+ 'where a model is a package that includes at least one Class. '
+ 'Needs parameter "output_lod_url".',
)
class TTLRenderer(LodRenderer):
'''
Renders all model packages using jinja2 and a template.
A model package is a package with at least 1 class inside
'''
def writeToFile(self, graph, args):
# get filename
base_name, ext = os.path.splitext(args.outputfile)
outputfile = f'{base_name}.ttl'
with open(outputfile, 'w') as file:
file.write(graph.serialize(format='turtle'))
@RendererRegistry.register(
"rdf",
descr='Renderer that renders Linked Data ontology in RDF from the supplied models, '
+ 'where a model is a package that includes at least one Class. '
+ ' Needs parameter "output_lod_url".',
)
class RDFRenderer(LodRenderer):
'''
Renders all model packages using jinja2 and a template.
A model package is a package with at least 1 class inside
'''
def writeToFile(self, graph, args):
# get filename
base_name, ext = os.path.splitext(args.outputfile)
outputfile = f'{base_name}.rdf'
with open(outputfile, 'w') as file:
file.write(graph.serialize(format='xml'))
@RendererRegistry.register(
"json-ld",
descr='Renderer that renders Linked Data ontology in JSON-LD from the supplied models, '
+ 'where a model is a package that includes at least one Class. '
+ ' Needs parameter "output_lod_url".',
)
class JSONLDRenderer(LodRenderer):
'''
Renders all model packages using jinja2 and a template.
A model package is a package with at least 1 class inside
'''
def writeToFile(self, graph, args):
# get filename
base_name, ext = os.path.splitext(args.outputfile)
outputfile = f'{base_name}.jsonld'
with open(outputfile, 'w') as file:
file.write(graph.serialize(format='json-ld'))
| brienen/crunch_uml | crunch_uml/renderers/lodrenderer.py | lodrenderer.py | py | 6,268 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "crunch_uml.renderers.renderer.ModelRenderer",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "crunch_uml.db.Database",
"line_number": 24,
"usage_type": "attribute"
},
... |
37710422808 | from azure.cognitiveservices.vision.customvision.training import training_api
from azure.cognitiveservices.vision.customvision.training.models import ImageUrlCreateEntry
from azure.cognitiveservices.vision.customvision.prediction import prediction_endpoint
from azure.cognitiveservices.vision.customvision.prediction.prediction_endpoint import models
import os
import requests
import string
def identify(image):
predictor = prediction_endpoint.PredictionEndpoint("15c7f6bd782b4fab887295c83a608f42")
with open(image, mode="rb") as test_data:
results = predictor.predict_image("ac4d0722-29ce-4116-b9d2-225b453a3df3", test_data.read())
answer = None
percent = 0
for prediction in results.predictions:
if prediction.probability > .5:
if (answer == None):
answer = prediction.tag
else:
if prediction.probability > percent:
answer = prediction.tag
percent = prediction.probability
return answer
#takes in a list of paths to photos including row/col index
def images2Circ(photos):
#for p in photos:
# print(identify(p))
rind = 8
cind = 10
rows = 6
cols = 8
circuit = [[None for i in range(cols)] for j in range(rows)]
for pic in photos:
row = int(pic[rind])
col = int(pic[cind])
print(row,col)
gate = identify(pic)
if gate == "Corner":
circuit[row][col] = None
else:
circuit[row][col] = gate
print(circuit)
return circuit
#print(images2Circ(["pic/t3/aaa2a3.jpg","pic/t3/bbb1b4.jpg","pic/t3/ccc5c2.jpg","pic/t3/ddd5d5.jpg","pic/t3/eee0e0.jpg"]))
| Guptacos/tartanhacks2018 | image_recognition.py | image_recognition.py | py | 1,718 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "azure.cognitiveservices.vision.customvision.prediction.prediction_endpoint.PredictionEndpoint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "azure.cognitiveservices.vision.customvision.prediction.prediction_endpoint",
"line_number": 16,
"usage_type": "name"
... |
15687075462 | """Module that contains reusable functions to interact with azure."""
import os
import yaml
import json
import shutil
from typing import Tuple, List, Dict, Union, Optional
from azureml.core import Workspace, Model, Dataset, Datastore
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.core.authentication import ServicePrincipalAuthentication, InteractiveLoginAuthentication
from sentence_transformers import SentenceTransformer, CrossEncoder
from azure.ai.ml import MLClient
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
def get_sp_auth():
"""
Function that returns an authentication object that can be used to authenticate with the azure ml workspace.
Returns:
ServicePrincipalAuthentication|InteractiveLoginAuthentication: Authentication object that can be used to authenticate with the azure ml workspace.
"""
# in case your working on a local machine with the service principle located in the workfolder root
f_path = '.cloud/.azure/AZURE_SERVICE_PRINCIPAL.json'
if os.path.exists(f_path):
with open(f_path) as f:
cred = f.read()
os.environ['AZURE_SERVICE_PRINCIPAL'] = cred
service_principle_str = os.environ.get('AZURE_SERVICE_PRINCIPAL')
interactive_login = False
# the sp file needs to exist or the env var is already set, with codespace secrets for example
if service_principle_str is not None:
service_principle_cred = json.loads(service_principle_str)
if service_principle_cred:
print("Authenticate with environment variable.")
tenant_id = service_principle_cred["tenant"]
sp_id = service_principle_cred["appId"]
sp_pwd = service_principle_cred["password"]
else:
if os.path.exists("tenant.txt") and os.path.exists("appid.txt") and os.path.exists("password.txt"):
print("Authenticate with text files data.")
tenant_id = open("tenant.txt").read()
sp_id = open("appid.txt").read()
sp_pwd = open("password.txt").read()
else:
print("Interactive login.")
interactive_login = True
else:
interactive_login = True
if interactive_login:
return InteractiveLoginAuthentication(tenant_id="95101651-f23a-4239-a566-84eb874f75f4")
else:
sp_auth = ServicePrincipalAuthentication(
tenant_id=tenant_id, service_principal_id=sp_id, service_principal_password=sp_pwd
)
return sp_auth
def get_ws(stage="dev") -> Workspace:
"""Function that returns a workspace for the given stage.
Args:
stage (str, optional): One of the deployment staged. Either dev/uat/prod. Defaults to "dev".
Raises:
ValueError: In case an invalid stage name is passed.
Returns:
Workspace: _description_
"""
stages = {"dev", "uat", "staging", "prod"}
if stage not in stages:
raise ValueError("Invalid stage for workspace: got %s, should be from %s" % (stage, stages))
sp_auth = get_sp_auth()
config_path = ".cloud/.azure/{stage}_config.json".format(stage=stage)
ws = Workspace.from_config(config_path, auth=sp_auth)
return ws
def get_ml_client(stage: str = "dev"):
"""Function that returns a MLClient for the given stage.
Args:
stage (str, optional): Name of the deployment stage. Defaults to "dev".
Raises:
ValueError: In case an invalid stage is passed.
Returns:
_type_: the mlclient for the given stage that can be used to interact with the ml workspace.
"""
stages = {"dev", "uat", "staging", "prod"}
if stage not in stages:
raise ValueError("Invalid stage for workspace: got %s, should be from %s" % (stage, stages))
sp_auth = get_sp_auth()
config_path = ".cloud/.azure/{stage}_config.json".format(stage=stage)
ml_client = MLClient.from_config(credential=sp_auth, path=config_path)
return ml_client
def get_secret_client(stage: str = "dev") -> SecretClient:
"""Function that returns a secret client for the given stage.
Args:
stage (str, optional): Deployment stage. Defaults to "dev".
Raises:
ValueError: In case an invalid stage is passed.
Returns:
SecretClient: Secret client for the given stage that can be used to set/get secrets from the keyvault.
"""
stages = {"dev", "uat", "staging", "prod"}
if stage not in stages:
raise ValueError("Invalid stage for workspace: got %s, should be from %s" % (stage, stages))
# sp_auth = get_sp_auth()
# config_path = ".cloud/.azure/{stage}_config.json".format(stage=stage)
# read vault name from deployment config
with open(".cloud/.azure/resources_info.json") as f:
deployment_config = json.load(f)
vault_name = deployment_config[stage]["keyvault"]
vault_url = f"https://{vault_name}.vault.azure.net/"
credential = DefaultAzureCredential()
secret_client = SecretClient(vault_url=vault_url, credential=credential)
return secret_client
def configure_computes(ws: Workspace, clusters: List[Tuple[str, str, int]]):
'''
clusters is a list consisting of the tuples (cluster_name, vm_size, max_nodes)
e.g. cluster_names = [(cpu-cluster, STANDARD_D2_V2, 2), (gpu-cluster, Standard_NC6, 4)]
'''
made_clusters = []
print("making the clusters:", clusters)
for cluster_name, vm_size, max_nodes in clusters:
# Verify that cluster does not exist already
try:
cluster = ComputeTarget(workspace=ws, name=cluster_name)
vm_size_existing = cluster.serialize()['properties']['properties']['vmSize']
if vm_size_existing.lower() != vm_size.lower():
print(
f'WARNING: cluster {cluster_name} exists but with vm_size {vm_size_existing} instead of requested {vm_size} \nWe will still use the existing cluster'
)
else:
print(f'Found existing cluster {cluster_name}, use it.')
except ComputeTargetException:
# To use a different region for the compute, add a location='<region>' parameter
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size,
max_nodes=max_nodes,
idle_seconds_before_scaledown=300,
)
cluster = ComputeTarget.create(ws, cluster_name, compute_config)
print(f"Creating new cluster {cluster_name} of type {vm_size} with {max_nodes} nodes")
cluster.wait_for_completion(show_output=False)
made_clusters.append(cluster)
return made_clusters
def download_model(workspace: Workspace, model: Dict[str, Union[str, int]], model_type):
"""
Function downloads the model and copies it to the models/model_type folder
:param model: Dictionary that contains the name and version of the model that needs to be downloaded
"""
print(f'download bi_encoder{model["name"]}:{model["version"]}')
model_path = Model.get_model_path(model_name=model['name'], version=model['version'], _workspace=workspace)
shutil.copytree(src=model_path, dst=f"models/{model_type}")
return model_path
def combine_models(
config_file: str = "configs/deployment_config.yaml",
language_config: str = "configs/model_languages.yaml",
bi_encoder: Tuple[str, int] = None,
):
"""
Combines 2 models that are on the model registry into 1 model and registers it again, so it can be used for inference
:config_file: Location to a config yaml file that contains info about the deployment
:language_config: Location to a config yaml file that contains info about what model to use for which language
:param bi_encoder: (model_name, model_version) as stated in the model registry for the first model if empty the standard untrained model will be used
"""
ws = get_ws("dev")
with open(config_file, 'r') as file:
config = yaml.safe_load(stream=file)
with open(language_config, 'r') as file:
language_models = yaml.safe_load(stream=file)
language = config['corpus_language']
bi = language_models[language.lower()]['bi_encoder']
cross = language_models[language.lower()]['cross_encoder']
if bi_encoder:
registry_model = {"name": bi_encoder[0], "version": bi_encoder[1]}
_ = download_model(ws, registry_model, model_type="bi_encoder")
else:
bi_model = SentenceTransformer(bi)
bi_model.save("models/bi_encoder")
model = CrossEncoder(cross)
model.save("models/cross_encoder")
Model.register(
ws,
model_path="models",
model_name="bi_cross_encoders",
description="Combination of a bi- and cross-encoder that is needed to do inference"
)
shutil.rmtree('models')
def upload_folder_to_datastore(path_on_datastore, local_data_folder, stage='dev'):
"""Function that will upload a local folder to the default datastore of the dev environment
Args:
path_on_datastore (string): Path on datastore where the folder is uploaded to
local_data_folder (string): Path to the local folder that needs to be uploaded
stage (string, optional): Name of the environment stage that the data needs to be uploaded to
"""
workspace = get_ws(stage)
# Gets the default datastore, this is where we are going to save our new data
datastore = workspace.get_default_datastore()
# Under which path do we want to save our new data
datastore_path = path_on_datastore #/{}".format(str_date_time)
# Select the directory where we put our processed data and upload it to our datastore
preprocessed = Dataset.File.upload_directory(local_data_folder, (datastore, datastore_path), overwrite=True)
| ReBatch-ML/AnswerSearch | packages/azureml_functions.py | azureml_functions.py | py | 9,949 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"lin... |
33504817634 | from .models.user_tokens import UserTokens
from .models.sources_enabled import SourcesEnabled
from .searchers.constants import DEFAULT_PAGE_SIZE
from .models.results import Results, SourceResult
from .searchers import available_searchers
from .decorators import immutable
import logging
from collections import defaultdict
import grequests
logger = logging.getLogger(__name__)
class Link(object):
""" this is the core class and should be used outside
the package for search """
def __init__(self, user_tokens: UserTokens, sources_enabled: SourcesEnabled = None):
""" sources enabled being set to None implies all integrations for which token is set will be searched"""
self.__sources_enabled = sources_enabled
self.__user_tokens = user_tokens
if self.__sources_enabled is None:
self.__sources_enabled = SourcesEnabled(
list(self.__user_tokens.tokens.keys()))
super().__init__()
self.__page = 1
self.__pages = []
self.__results = Results()
self.__source_results = {}
self.__fetchers_modules = {}
self.__fetchers = defaultdict(list)
self.__reset()
@staticmethod
def builder(user_tokens: UserTokens, sources_enabled: SourcesEnabled = None):
return Link(user_tokens, sources_enabled)
def fetch(self):
self.__validate()
if len(self.__pages) >= self.__page:
logger.info(
"We don't have to load another page as its already been loaded")
self.__page += 1
return self.__pages[self.__page-2]
if self.__results.unfetched_results() >= self.__page_size:
self.__page += 1
output = self.__results.topk(self.__page_size)
self.__pages.append(output)
return output
if not self.__fetchers:
self.initialize_fetchers()
requests = []
for source in self.__sources_enabled.tokens:
for fetcher in self.__fetchers[source]:
request = fetcher.construct_request(
self.__page)
if request is not None:
requests.append(request)
grequests.map(requests)
self.__page += 1
output = self.__results.topk(self.__page_size)
self.__pages.append(output)
return output
def initialize_fetchers(self):
for source in self.__sources_enabled.tokens:
source_result = SourceResult(source)
for module in available_searchers[source]:
logger.debug(
f"Creating fetcher for {source} with name {module.name}")
self.__source_results[source] = source_result
self.__results.add_source_result(source_result)
self.__fetchers[source].append(
module(self.__user_tokens.tokens[source], self.__query, self.__page_size, source_result, self.__user_only))
if not self.__user_only and module.user_priority:
self.__fetchers[source].append(
module(self.__user_tokens.tokens[source], self.__query, self.__page_size, source_result, True))
def previous(self):
if self.__page < 3:
logger.info("Went too far back, this page doesn't exist")
return []
logger.info("Fetching a previous page")
self.__page -= 1
return self.__pages[self.__page-2]
@immutable("page_size", DEFAULT_PAGE_SIZE)
def page_size(self, page_size):
self.__page_size = page_size
return self
@immutable("query")
def query(self, query):
self.__query = query
return self
@immutable("user_only", False)
def user_only(self, user_only=False):
self.__user_only = user_only
return self
def __disable_all_sources(self):
self.__sources_enabled = []
def __validate(self):
assert(self.__query != None), "Query cant be None"
assert(self.__query != ""), "Query cant be empty"
assert(self.__user_tokens != None), "User Tokens cant be none"
assert(len(self.__sources_enabled.tokens) > 0), "No source enabled"
assert(set(self.__sources_enabled.tokens).issubset(
self.__user_tokens.tokens.keys())), "More sources enabled than tokens provided"
def __reset(self):
self.__page_size = DEFAULT_PAGE_SIZE
self.__query = None
self.__user_only = False
| h4ck3rk3y/link | link/core.py | core.py | py | 4,506 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.user_tokens.UserTokens",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "models.sources_enabled.SourcesEnabled",
"line_number": 18,
"usage_type": "name"
},
{
... |
70680774589 | from django import forms
from crispy_forms.helper import FormHelper
from .models import Category
from crispy_forms.layout import Submit, Layout, Div, HTML, Field
class CategoryForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CategoryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(Div(
Div(Div(
Field('kind'),
css_class='col-md-12'), css_class='row'),
Div(
Submit('submit', 'Salvar', css_class="btn btn-info btn-lg"),
HTML('<a href="{% url "dashboard" %}" class="btn btn-outline-secondary btn-lg">Voltar</a>'),
css_class='row btn-group col-md-12 d-flex justify-content-end'),
css_class='col-md-12'), css_class='row mt-5 w-100')
)
class Meta:
model = Category
fields = ('kind',)
| ifcassianasl/movie-list | category/forms.py | forms.py | py | 965 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "crispy_forms.helper.FormHelper",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": ... |
40920706579 | from django.urls import path
from lanarce_portfolio.images.api.views import ImagesCreateListAPI, ImageUpdateDeleteAPI, CommentListAPI
app_name = "images"
urlpatterns = [
path(
"", ImagesCreateListAPI.as_view(), name="images-create-list"
),
path(
"<uuid:image_id>/", ImageUpdateDeleteAPI.as_view(), name="image-update-delete"
),
path(
"<uuid:image_id>/comment/", CommentListAPI.as_view(), name="comment-list"
),
]
| Ari100telll/lanarce_portfolio | lanarce_portfolio/images/urls.py | urls.py | py | 465 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "lanarce_portfolio.images.api.views.ImagesCreateListAPI.as_view",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "lanarce_portfolio.images.api.views.ImagesCreateListAPI",
"line_... |
72592445948 | import yaml
from .defaults import METADETECT_CONFIG
def load_config(config_path):
"""Load a config file and return it.
Parameters
----------
config_path : str, optional
The path to the config file.
Returns
-------
sim_config : dict
A dictionary of the sim config options.
run_config : dict
A dictionary of the run config options.
shear_meas_config : dict
A dictionary of the shear measurement options.
swap12 : bool
If True, swap the role of the 1- and 2-axes in the shear measurement.
cut_interp : bool
If True, cut objects with too much interpolation.
"""
with open(config_path, 'r') as fp:
config = yaml.load(fp, Loader=yaml.Loader)
swap12 = config.pop('swap12', False)
cut_interp = config.pop('cut_interp', False)
run_config = {
'n_patches_per_job': 200,
'n_patches': 10_000_000,
'n_jobs_per_script': 500}
run_config.update(config.get('run', {}))
shear_meas_config = config.get('shear_meas', {})
shear_meas_config.update(METADETECT_CONFIG)
return config['sim'], run_config, shear_meas_config, swap12, cut_interp
| beckermr/metadetect-coadding-sims | coadd_mdetsims/config.py | config.py | py | 1,186 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "yaml.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "yaml.Loader",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "defaults.METADETECT_CONFIG",
"line_number": 41,
"usage_type": "argument"
}
] |
21011206894 | import argparse
import pandas as pd
import cv2
import mediapipe as mp
mp_pose = mp.solutions.pose
from pose_embedder import FullBodyPoseEmbedder
from pose_classifier import PoseClassifier
import numpy as np
classifiers = {}
def run_classify(csv_path):
# initialise Pose estimator for whole video
pose = mp_pose.Pose(
min_detection_confidence=0.5,
min_tracking_confidence=0.5
)
print(f'Reading from {csv_path}')
df = pd.read_csv(csv_path)
filepaths_exercises = zip(df['filepaths'], df['exercise'], df['groundtruth'])
classifications = [classify(fname, exercise, gt, pose) for fname, exercise, gt in filepaths_exercises]
df['prediction'] = classifications
df.to_csv(csv_path, header=True, index=None)
def classify(fname, exercise, groundtruth, pose):
classifier_samples_folder = f'{exercise}_csvs_out'
# Transforms pose landmarks into embedding.
pose_embedder = FullBodyPoseEmbedder()
# Classifies give pose against database of poses.
if classifier_samples_folder in classifiers:
pose_classifier = classifiers[classifier_samples_folder]
else:
pose_classifier = PoseClassifier(
pose_samples_folder=classifier_samples_folder,
pose_embedder=pose_embedder,
top_n_by_max_distance=30,
top_n_by_mean_distance=10)
classifiers[classifier_samples_folder] = pose_classifier
print(fname)
print(exercise)
img = cv2.imread(fname)
classification_result = 0.0
results = pose.process(img)
pose_landmarks = results.pose_landmarks
if pose_landmarks:
frame_height, frame_width = img.shape[0], img.shape[1]
pose_landmarks = np.array(
[[lmk.x * frame_width, lmk.y * frame_height, lmk.z * frame_width]
for lmk in pose_landmarks.landmark],
dtype=np.float32)
assert pose_landmarks.shape == (33, 3), 'Unexpected landmarks shape: {}'.format(pose_landmarks.shape)
p_w_bar_x = {k:v/10. for k,v in sorted(pose_classifier(pose_landmarks).items(), key=lambda item: item[1], reverse=True)}
print(f'P(w|x): {p_w_bar_x}')
print(groundtruth)
gt_label = f'{exercise}_{groundtruth}'
if gt_label in p_w_bar_x:
classification_result = float(p_w_bar_x[gt_label])
return classification_result
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('csv_path')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(args)
run_classify(args.csv_path)
| insidedctm/pose_knn_classifier | classify.py | classify.py | py | 2,471 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mediapipe.solutions",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pose_embedder.FullBodyPoseEmbedder",
"line_number": 34,
"usage_type": "call"
},
{
"api_na... |
40319564097 | from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.handler import \
module_dependency_error, MODULE_EXCEPTIONS
try:
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.helper.main import \
diff_remove_empty
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api import Session
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.defaults.main import \
OPN_MOD_ARGS, STATE_MOD_ARG
except MODULE_EXCEPTIONS:
module_dependency_error()
DOCUMENTATION = 'https://opnsense.ansibleguy.net/en/latest/modules/_tmpl.html'
EXAMPLES = 'https://opnsense.ansibleguy.net/en/latest/modules/_tmpl.html'
def run_module():
module_args = dict(
name=dict(type='str', required=True),
description=dict(type='str', required=False, default='', aliases=['desc']),
content=dict(type='list', required=False, default=[], elements='str'),
type=dict(type='str', required=False, choices=['1', '2'], default='1'),
**STATE_MOD_ARG,
**OPN_MOD_ARGS,
)
result = dict(
changed=False,
diff={
'before': {},
'after': {},
}
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
)
session = Session(module=module)
# do api interactions here
exists = True # check via api if the item already exists
# session.get(cnf={
# 'module': 'API-Module',
# 'controller': 'API-Controller',
# 'command': 'API-info-command',
# 'data': {'tests'}
# })
if exists:
result['diff']['before'] = 'test' # set to current value for diff-mode
if module.params['state'] == 'absent':
if exists:
result['changed'] = True
if not module.check_mode:
# remove item via api if not in check-mode
# session.post(cnf={
# 'module': 'API-Module',
# 'controller': 'API-Controller',
# 'command': 'API-delete-command',
# 'params': ['uuid'],
# })
pass
else:
if exists:
value_changed = True # compare existing item config with configured one
if value_changed:
result['diff']['after'] = 'tests' # set to configured value(s)
if not module.check_mode:
# update item via api if not in check-mode
# session.post(cnf={
# 'module': 'API-Module',
# 'controller': 'API-Controller',
# 'command': 'API-update-command',
# 'data': {'tests'},
# 'params': ['uuid'],
# })
pass
else:
result['diff']['after'] = 'tests' # set to configured value(s)
if not module.check_mode:
# create item via api if not in check-mode
# session.post(cnf={
# 'module': 'API-Module',
# 'controller': 'API-Controller',
# 'command': 'API-add-command',
# 'data': {'tests'},
# })
pass
# don't forget to call the 'reload' endpoint to activate the changes (if available/needed)
# cleanup and exit
session.close()
result['diff'] = diff_remove_empty(result['diff'])
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| ansibleguy/collection_opnsense | plugins/modules/_tmpl_direct.py | _tmpl_direct.py | py | 3,679 | python | en | code | 158 | github-code | 6 | [
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.handler.MODULE_EXCEPTIONS",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.handler.module_dependency_error",
"line_number": 14,
"u... |
3774639154 | __author__ = 'shixk'
import datetime
from SearchFiles import SearchFiles
class GetData(object):
def loadfilterdata(self, query, conf):
if query['method'] == "time":
return self.filterbydate(query, conf)
else:
return {'ERROR': 'no method'}
def filterbydate(self, query, conf):
sf = SearchFiles(conf)
global file_list
if 'filetype' in query.keys():
query['filetype'] = ['.' + q for q in query['filetype'].split(',')]
if 'start' not in query.keys():
file_list = sf.getfilenotime(query['filetype'])
return file_list
elif 'end' not in query.keys():
query['end'] = datetime.datetime.strptime(query['start'], "%Y-%m-%d") + datetime.timedelta(hours=24)
file_list = sf.getfilelist(query['start'], query['end'], query['filetype'])
else:
if 'start' not in query.keys():
file_list = sf.getfileno2t()
return file_list
elif 'end' not in query.keys():
query['end'] = datetime.datetime.strptime(query['start'], "%Y-%m-%d") + datetime.timedelta(hours=24)
file_list = sf.getfileno_type(query['start'], query['end'])
return file_list | shinSG/SimplePictureService | HttpService/GetData.py | GetData.py | py | 1,285 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "SearchFiles.SearchFiles",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_nam... |
5001311387 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import Question
class AnswerFrom(forms.Form):
content=forms.CharField(widget=forms.Textarea(attrs={'rows': 6}), label='Trả lời')
def __init__(self, *args, **kwargs):
super(AnswerFrom, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'answer'
self.helper.form_method = 'post'
self.helper.form_action = 'answer/'
self.helper.add_input(Submit('submit', 'Trả lời'))
class QuestionForm(forms.Form):
title=forms.CharField(required=True, label='Question')
content=forms.CharField(label='Content', widget=forms.Textarea(), required=False)
tags=forms.CharField(label='Topics')
def clean_title(self):
title=self.cleaned_data['title'].strip()
if title[len(title)-1]!='?':
title+='?'
return title
def clean_tags(self):
tags=self.cleaned_data['tags'].strip()
tags=tags.strip(',')
tags=tags.split(',')
for i in range(len(tags)):
tags[i]=tags[i].lower().title()
return tags
def __init__(self, *args, **kwargs):
super(QuestionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'question'
self.helper.form_method = 'post'
self.helper.form_action = 'question:add'
self.helper.add_input(Submit('submit', 'Submit')) | duonghau/hoidap | question/forms.py | forms.py | py | 1,575 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.Form",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.forms"... |
21002262598 | import os
import pandas as pd
from natsort import natsorted
from openpyxl import load_workbook
dirName = './pozyxAPI_dane_pomiarowe'
def parse_learn_data():
data = pd.DataFrame()
for filename in natsorted(os.listdir(dirName)):
if filename.endswith(".xlsx"):
df = pd.read_excel(f"{dirName}/{filename}")
df = df[['0/timestamp', 't', 'no', 'measurement x', 'measurement y', 'reference x', 'reference y']]
data = data.append(df, ignore_index=True)
df = pd.read_excel("./pozyxAPI_only_localization_dane_testowe_i_dystrybuanta.xlsx")
df = df[['0/timestamp', 't', 'no', 'measurement x', 'measurement y', 'reference x', 'reference y']]
data = data.append(df, ignore_index=True)
data.to_csv("./dataset.csv")
def add_results_to_main_excel(predict_test):
df_to_export = pd.DataFrame(predict_test)
with pd.ExcelWriter('./tmp.xlsx') as writer:
df_to_export.to_excel(writer, sheet_name='Results', index=False)
export_workbook = load_workbook(filename='./tmp.xlsx')
export_sheet = export_workbook.active
target_workbook = load_workbook(filename='./pozyxAPI_only_localization_dane_testowe_i_dystrybuanta.xlsx')
target_sheet = target_workbook.active
export_values = []
for value in export_sheet.iter_rows(min_row=2, max_col=2, values_only=True):
export_values.append(value)
os.remove('tmp.xlsx')
for it in range(1, export_values.__len__()+1):
target_sheet[f'O{it + 1}'] = export_values[it-1][0]
target_sheet[f'P{it + 1}'] = export_values[it-1][1]
target_workbook.save('./Results.xlsx')
| precel120/SISE | Task 2/excel.py | excel.py | py | 1,618 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "natsort.natsorted",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.